- 浏览: 23896 次
- 性别:
- 来自: 北京
最新评论
case BINDER_SET_MAX_THREADS:
[// case BINDER_SET_MAX_THREADS
BINDER_SET_MAX_THREADS是设置Binder线程池的大小。
]// case BINDER_SET_MAX_THREADS
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
[// if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads)))
由此可见,BINDER_SET_MAX_THREADS的实现很简单,就是设置binder_proc的max_threads字段。
]// if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads)))
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
[// case BINDER_SET_CONTEXT_MGR
BINDER_SET_CONTEXT_MGR是注册Service Manager服务的。
]// case BINDER_SET_CONTEXT_MGR
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
[// if (binder_context_mgr_node != NULL)
binder_context_mgr_node是一个全局的静态变量。指向全局的Service Manager的Binder实体对象。
当它不为NULL时,说明已经设置过Service Manager
static struct binder_node *binder_context_mgr_node;
]// if (binder_context_mgr_node != NULL)
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
} else
binder_context_mgr_uid = current->cred->euid;
[// if (binder_context_mgr_uid != -1)
binder_context_mgr_uid也是一个静态的全局变量,保存Service Manager的进程ID。
binder_context_mgr_uid的定义如下:
static uid_t binder_context_mgr_uid = -1;
当binder_context_mgr_uid不为-1时,说明前面已经设置过Service Manager服务了。
这时候会判断当前进程是否是Service Manager注册的进程,也就是检查合法性
]// if (binder_context_mgr_uid != -1)
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
[// binder_context_mgr_node = binder_new_node(proc, NULL, NULL)
程序走到这里,则会调用binder_new_node来创建一个Binder实体对象。
binder_new_node函数的实现如下所示:
static struct binder_node *binder_new_node(struct binder_proc *proc,
void __user *ptr,
void __user *cookie)
[// static struct binder_node *binder_new_node(struct binder_proc *proc,
第一个参数proc用来描述Service Manager进程。
第二个参数ptr用来描述一个Binder本地对象,指向该Binder本地对象内部的一个弱引用计数对象的地址值
第三个参数cookie也用来描述一个Binder本地对象,指向该Binder本地对象的地址值
]// static struct binder_node *binder_new_node(struct binder_proc *proc,
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
[// while (*p)
在这个while循环中以参数ptr为关键字,在红黑树中检查前面是否已经为参数ptr和cookie所描述的Binder本地对象创建过Binder实体对象。
]// while (*p)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p created\n",
proc->pid, current->pid, node->debug_id,
node->ptr, node->cookie);
return node;
[// return node;
上面这段代码是创建一个Binder实体对象,并且对它进行初始化,最后将它加入到宿主进程的成员变量nodes所描述的一个红黑树中。
]// return node;
}
]// binder_context_mgr_node = binder_new_node(proc, NULL, NULL)
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
[// binder_context_mgr_node->has_weak_ref = 1
上面这四句代码是与引用计数相关的。
]// binder_context_mgr_node->has_weak_ref = 1
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION:
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
[// if (thread)
上面这句代码是将当前线程的状态位BINDER_LOOPER_STATE_NEED_RETURN清零,这样当前线程下次再次进入到Binder驱动程序时,Binder驱动程序就可以将进程间通信请求分发给它处理了。
]// if (thread)
mutex_unlock(&binder_lock);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
上面的分析主要集中在Binder驱动程序中,Android系统在Frameworks层中将各个Binder驱动程序操作封装成一个Binder库,这样进程就可以方便地调用Binder库提供的接口来实现进程间通信。
接下来,我们分析一下Binder库的实现。
在Binder库中,Service组件和Client组件分别使用模版类BnInterface和BpInterface来描述,其中,前者称为Binder本地对象,后者称为Binder代理对象。
BnInterface和BpInterface的作用是为了代表驱动程序中的Binder实体对象和Binder引用对象。
模版类BnInterface和BpInterface定义在frameworks/base/include/binder/IInterface.h文件中,定义如下:
template<typename INTERFACE>
class BnInterface : public INTERFACE, public BBinder
{
public:
virtual sp<IInterface> queryLocalInterface(const String16& _descriptor);
virtual const String16& getInterfaceDescriptor() const;
protected:
virtual IBinder* onAsBinder();
};
template<typename INTERFACE>
class BpInterface : public INTERFACE, public BpRefBase
{
public:
BpInterface(const sp<IBinder>& remote);
[// BpInterface(const sp<IBinder>& remote)
BpInterface的构造函数实现如下:
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)
{
}
]// BpInterface(const sp<IBinder>& remote)
protected:
virtual IBinder* onAsBinder();
};
其中BnInterface和BpInterface都是模版类,其中模版参数INTERFACE要求必须继承自IInterface接口,这个接口也定义在frameworks/base/include/binder/IInterface.h文件中。
而且要求BnInterface和BpInterface都继承自INTERFACE!我靠,这设计够牛逼!
IInterface的定义如下:
class IInterface : public virtual RefBase
[// class IInterface : public virtual RefBase
可以看出IInterface继承了RefBase, 表示可以通过智能指针来管理对象
]// class IInterface : public virtual RefBase
{
public:
IInterface();
sp<IBinder> asBinder();
sp<const IBinder> asBinder() const;
protected:
virtual ~IInterface();
virtual IBinder* onAsBinder() = 0;
};
我们首先分析Binder本地对象BnInterface的设计,从上面BnInterface类的声明上可以看出除了继承自IInterface之外,还继承了BBinder类,这个类为Binder本地对象提供了抽象的进程间通信接口。BBinder类的定义如下:
BBinder类的定义存在与frameworks\native\include\binder\Binder.h文件中,BBinder类的定义如下:
class BBinder : public IBinder
[// class BBinder : public IBinder
在BBinder类有两个重要的成员函数transact和onTransact.
当一个Binder代理对象通过Binder驱动程序向一个Binder本地对象发出一个进程间通信请求时,Binder驱动程序就会调用该Binder本地对象的成员transact来处理该请求。
成员函数onTransact是由BBinder的子类,即Binder本地对象类来实现的,它负责分发与业务相关的进程间通信请求。事实上,与业务相关的进程间通信请求是由Binder本地对象类的子类,即Service组件类来负责处理的。
]// class BBinder : public IBinder
{
public:
BBinder();
virtual const String16& getInterfaceDescriptor() const;
virtual bool isBinderAlive() const;
virtual status_t pingBinder();
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t transact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
virtual status_t linkToDeath(const sp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0);
virtual status_t unlinkToDeath( const wp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL);
virtual void attachObject( const void* objectID,
void* object,
void* cleanupCookie,
object_cleanup_func func);
virtual void* findObject(const void* objectID) const;
virtual void detachObject(const void* objectID);
virtual BBinder* localBinder();
protected:
virtual ~BBinder();
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
private:
BBinder(const BBinder& o);
BBinder& operator=(const BBinder& o);
class Extras;
Extras* mExtras;
void* mReserved0;
};
IBinder类的定义如下所示,定义在frameworks\native\include\binder\IBinder.h文件中
class IBinder : public virtual RefBase
{
public:
enum {
FIRST_CALL_TRANSACTION = 0x00000001,
LAST_CALL_TRANSACTION = 0x00ffffff,
PING_TRANSACTION = B_PACK_CHARS('_','P','N','G'),
DUMP_TRANSACTION = B_PACK_CHARS('_','D','M','P'),
INTERFACE_TRANSACTION = B_PACK_CHARS('_', 'N', 'T', 'F'),
SYSPROPS_TRANSACTION = B_PACK_CHARS('_', 'S', 'P', 'R'),
// Corresponds to TF_ONE_WAY -- an asynchronous call.
FLAG_ONEWAY = 0x00000001
};
IBinder();
/**
* Check if this IBinder implements the interface named by
* @a descriptor. If it does, the base pointer to it is returned,
* which you can safely static_cast<> to the concrete C++ interface.
*/
virtual sp<IInterface> queryLocalInterface(const String16& descriptor);
/**
* Return the canonical name of the interface provided by this IBinder
* object.
*/
virtual const String16& getInterfaceDescriptor() const = 0;
virtual bool isBinderAlive() const = 0;
virtual status_t pingBinder() = 0;
virtual status_t dump(int fd, const Vector<String16>& args) = 0;
virtual status_t transact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0) = 0;
/**
* This method allows you to add data that is transported through
* IPC along with your IBinder pointer. When implementing a Binder
* object, override it to write your desired data in to @a outData.
* You can then call getConstantData() on your IBinder to retrieve
* that data, from any process. You MUST return the number of bytes
* written in to the parcel (including padding).
*/
class DeathRecipient : public virtual RefBase
{
public:
virtual void binderDied(const wp<IBinder>& who) = 0;
};
/**
* Register the @a recipient for a notification if this binder
* goes away. If this binder object unexpectedly goes away
* (typically because its hosting process has been killed),
* then DeathRecipient::binderDied() will be called with a reference
* to this.
*
* The @a cookie is optional -- if non-NULL, it should be a
* memory address that you own (that is, you know it is unique).
*
* @note You will only receive death notifications for remote binders,
* as local binders by definition can't die without you dying as well.
* Trying to use this function on a local binder will result in an
* INVALID_OPERATION code being returned and nothing happening.
*
* @note This link always holds a weak reference to its recipient.
*
* @note You will only receive a weak reference to the dead
* binder. You should not try to promote this to a strong reference.
* (Nor should you need to, as there is nothing useful you can
* directly do with it now that it has passed on.)
*/
virtual status_t linkToDeath(const sp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0) = 0;
/**
* Remove a previously registered death notification.
* The @a recipient will no longer be called if this object
* dies. The @a cookie is optional. If non-NULL, you can
* supply a NULL @a recipient, and the recipient previously
* added with that cookie will be unlinked.
*/
virtual status_t unlinkToDeath( const wp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL) = 0;
virtual bool checkSubclass(const void* subclassID) const;
typedef void (*object_cleanup_func)(const void* id, void* obj, void* cleanupCookie);
virtual void attachObject( const void* objectID,
void* object,
void* cleanupCookie,
object_cleanup_func func) = 0;
virtual void* findObject(const void* objectID) const = 0;
virtual void detachObject(const void* objectID) = 0;
virtual BBinder* localBinder();
virtual BpBinder* remoteBinder();
protected:
virtual ~IBinder();
private:
};
接下来我们开始BpInterface的设计,BpInterface除了继承了IInterface之外,还继承了BpRefBase类,BpRefBase类为Binder代理对象提供了抽象的进程间通信接口。BpRefBase类的定义如下:
BpRefBase类的声明在frameworks\native\include\binder\Binder.h
class BpRefBase : public virtual RefBase
{
protected:
BpRefBase(const sp<IBinder>& o);
[// BpRefBase(const sp<IBinder>& o)
BpRefBase类的构造函数如下:
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
[// mRemote(o.get())
这里初始化mRemote成员变量
]// mRemote(o.get())
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this); // Removed on first IncStrong().
mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
}
}
]// BpRefBase(const sp<IBinder>& o)
virtual ~BpRefBase();
virtual void onFirstRef();
virtual void onLastStrongRef(const void* id);
virtual bool onIncStrongAttempted(uint32_t flags, const void* id);
inline IBinder* remote() { return mRemote; }
inline IBinder* remote() const { return mRemote; }
private:
BpRefBase(const BpRefBase& o);
BpRefBase& operator=(const BpRefBase& o);
IBinder* const mRemote;
[// IBinder* const mRemote
这里mRemote指向了一个Binder代理对象,其实是一个BpBinder类型的对象。
]// IBinder* const mRemote
RefBase::weakref_type* mRefs;
volatile int32_t mState;
};
从BpRefBase类的声明中可以看出,BpRefBase继承了RefBase。
总结一下BnInterface和BpInterface类集成体系:
BnInterface->(INTERFACE->IInterface->RefBase, BBinder->IBinder->RefBase)
BpInterface->(INTERFACE->IInterface->RefBase, BpRefBase->RefBase)
我们接着
在Binder库中,Android系统还设计了两个很重要的类: IPCThreadState和ProcessState类。
这两个类的作用是为了和Binder驱动程序交互。
用户在设计自己的Service组件和client组件时,一般的流程是首先将Service组件加入到Service Manager中去,然后调用
ProcessState::self()->startThreadPool()
IPCThreadState::self()->joinThreadPool();
这两句代码。
这两句代码的作用就是注册Binder线程池,表示可以处理Binder进程间通信请求了。
我们就从上面两句代码开始分析FrameWorks的Binder库。
我们先看一下ProcessState类的声明:
class ProcessState : public virtual RefBase
{
public:
static sp<ProcessState> self();
void setContextObject(const sp<IBinder>& object);
sp<IBinder> getContextObject(const sp<IBinder>& caller);
void setContextObject(const sp<IBinder>& object,
const String16& name);
sp<IBinder> getContextObject(const String16& name,
const sp<IBinder>& caller);
void startThreadPool();
typedef bool (*context_check_func)(const String16& name,
const sp<IBinder>& caller,
void* userData);
bool isContextManager(void) const;
bool becomeContextManager(
context_check_func checkFunc,
void* userData);
sp<IBinder> getStrongProxyForHandle(int32_t handle);
wp<IBinder> getWeakProxyForHandle(int32_t handle);
void expungeHandle(int32_t handle, IBinder* binder);
void setArgs(int argc, const char* const argv[]);
int getArgC() const;
const char* const* getArgV() const;
void setArgV0(const char* txt);
void spawnPooledThread(bool isMain);
status_t setThreadPoolMaxThreadCount(size_t maxThreads);
private:
friend class IPCThreadState;
ProcessState();
~ProcessState();
ProcessState(const ProcessState& o);
ProcessState& operator=(const ProcessState& o);
struct handle_entry {
IBinder* binder;
RefBase::weakref_type* refs;
};
handle_entry* lookupHandleLocked(int32_t handle);
int mDriverFD;
[// int mDriverFD
mDriverFD保存打开/dev/binder之后的文件描述符,就通过这个变量
]// int mDriverFD
void* mVMStart;
mutable Mutex mLock; // protects everything below.
Vector<handle_entry>mHandleToObject;
bool mManagesContexts;
context_check_func mBinderContextCheckFunc;
void* mBinderContextUserData;
KeyedVector<String16, sp<IBinder> >
mContexts;
String8 mRootDir;
bool mThreadPoolStarted;
[// bool mThreadPoolStarted
mThreadPoolStarted变量用于指示Binder库是否启动过
]// bool mThreadPoolStarted
volatile int32_t mThreadPoolSeq;
};
我们从ProcessState类的self()函数开始分析ProcessState的设计和实现:
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
[// gProcess = new ProcessState
这里调用ProcessState的构造函数来构造一个ProcessState对象并赋值给全局变量gProcess,我们来看一下ProcessState的构造函数的实现:
ProcessState::ProcessState()
: mDriverFD(open_driver())
[// mDriverFD(open_driver())
初始化mDriverFD是通过调用open_driver来初始化的。
open_driver函数的作用是打开/dev/binder文件,并做一些初始化的工作。
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR);
[// int fd = open("/dev/binder", O_RDWR)
打开/dev/binder文件
]// int fd = open("/dev/binder", O_RDWR)
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
[// status_t result = ioctl(fd, BINDER_VERSION, &vers)
首先通过ioctl函数获得Binder驱动程序的Binder内核的版本号。
这会陷入到Binder驱动程序的binder_ioctl函数中去。
]// status_t result = ioctl(fd, BINDER_VERSION, &vers)
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
[// if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION)
上面这段代码是为了验证Binder的版本。
]// if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION)
size_t maxThreads = 15;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
[// result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads)
这里是设置Binder线程池的大小,由此可以看出,默认的Binder线程池的大小为15。
]// result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads)
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
]// mDriverFD(open_driver())
, mVMStart(MAP_FAILED)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// XXX Ideally, there should be a specific define for whether we
// have mmap (or whether we could possibly have the kernel module
// availabla).
#if !defined(HAVE_WIN32_IPC)
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
[// mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0)
这里调用mmap函数来使得Binder驱动分配空间。
]// mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0)
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
#else
mDriverFD = -1;
#endif
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
]// gProcess = new ProcessState
return gProcess;
}
[// sp<ProcessState> ProcessState::self()
从self函数的实现可以看出ProcessState是一个单例模式。
]// sp<ProcessState> ProcessState::self()
分析完ProcessState的self函数之后,我们就看一下ProcessState的startThreadPool函数的实现:
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
[// spawnPooledThread(true)
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
char buf[16];
snprintf(buf, sizeof(buf), "Binder_%X", s);
ALOGV("Spawning new pooled thread, name=%s\n", buf);
[// ALOGV("Spawning new pooled thread, name=%s\n", buf)
这里初始化线程的名字
]// ALOGV("Spawning new pooled thread, name=%s\n", buf)
sp<Thread> t = new PoolThread(isMain);
[// sp<Thread> t = new PoolThread(isMain)
这里构造了一个PoolThread对象,PoolThread类的定义如下:
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
[// PoolThread(bool isMain)
PoolThread的构造函数很简单,基本是个空函数。
PoolThread的父类Thread的构造函数详见Thread类
]// PoolThread(bool isMain)
{
}
protected:
virtual bool threadLoop()
[// virtual bool threadLoop()
threadLoop函数是一个很重要的函数.创建的线程实际调用的就是这个函数.
]// virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
[// IPCThreadState::self()->joinThreadPool(mIsMain)
这里会调用IPCThreadState的joinThreadPool函数,详见IPCThreadState类的设计和分析.
]// IPCThreadState::self()->joinThreadPool(mIsMain)
return false;
}
const bool mIsMain;
};
PoolThread类又继承自Thread类,Thread类的声明如下:
class Thread : virtual public RefBase
{
public:
// Create a Thread object, but doesn't create or start the associated
// thread. See the run() method.
Thread(bool canCallJava = true);
[// Thread(bool canCallJava = true)
Thread类的构造函数如下:
Thread::Thread(bool canCallJava)
: mCanCallJava(canCallJava),
[// mCanCallJava(canCallJava)
可以看出, mCanCallJava默认是true
]// mCanCallJava(canCallJava)
mThread(thread_id_t(-1)),
mLock("Thread::mLock"),
mStatus(NO_ERROR),
mExitPending(false), mRunning(false)
#ifdef HAVE_ANDROID_OS
, mTid(-1)
#endif
{
}
]// Thread(bool canCallJava = true)
virtual ~Thread();
// Start the thread in threadLoop() which needs to be implemented.
virtual status_t run( const char* name = 0,
int32_t priority = PRIORITY_DEFAULT,
size_t stack = 0);
// Ask this object's thread to exit. This function is asynchronous, when the
// function returns the thread might still be running. Of course, this
// function can be called from a different thread.
virtual void requestExit();
// Good place to do one-time initializations
virtual status_t readyToRun();
// Call requestExit() and wait until this object's thread exits.
// BE VERY CAREFUL of deadlocks. In particular, it would be silly to call
// this function from this object's thread. Will return WOULD_BLOCK in
// that case.
status_t requestExitAndWait();
// Wait until this object's thread exits. Returns immediately if not yet running.
// Do not call from this object's thread; will return WOULD_BLOCK in that case.
status_t join();
#ifdef HAVE_ANDROID_OS
// Return the thread's kernel ID, same as the thread itself calling gettid() or
// androidGetTid(), or -1 if the thread is not running.
pid_t getTid() const;
#endif
protected:
// exitPending() returns true if requestExit() has been called.
bool exitPending() const;
private:
// Derived class must implement threadLoop(). The thread starts its life
// here. There are two ways of using the Thread object:
// 1) loop: if threadLoop() returns true, it will be called again if
// requestExit() wasn't called.
// 2) once: if threadLoop() returns false, the thread will exit upon return.
virtual bool threadLoop() = 0;
private:
Thread& operator=(const Thread&);
static int _threadLoop(void* user);
const bool mCanCallJava;
// always hold mLock when reading or writing
thread_id_t mThread;
mutable Mutex mLock;
Condition mThreadExitedCondition;
status_t mStatus;
// note that all accesses of mExitPending and mRunning need to hold mLock
volatile bool mExitPending;
volatile bool mRunning;
sp<Thread> mHoldSelf;
#ifdef HAVE_ANDROID_OS
// legacy for debugging, not used by getTid() as it is set by the child thread
// and so is not initialized until the child reaches that point
pid_t mTid;
#endif
};
PoolThread类继承自RefBase,说明可以用智能指针管理PoolThread。
]// sp<Thread> t = new PoolThread(isMain)
t->run(buf);
[// t->run(buf)
构造了PoolThread对象之外,就会调用run来启动线程,run函数其实是继承在Thread类的。
status_t Thread::run(const char* name, int32_t priority, size_t stack)
{
Mutex::Autolock _l(mLock);
if (mRunning) {
// thread already started
return INVALID_OPERATION;
}
// reset status and exitPending to their default value, so we can
// try again after an error happened (either below, or in readyToRun())
mStatus = NO_ERROR;
mExitPending = false;
mThread = thread_id_t(-1);
// hold a strong reference on ourself
mHoldSelf = this;
mRunning = true;
bool res;
if (mCanCallJava) {
res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread);
[// res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread)
这里调用createThreadEtc函数,createThreadEtc函数是在哪定义的呢?我找不到啊!!!!!!
我猜想是创建一个线程,并且调用_threadLoop函数。
_threadLoop函数是定义在Thread类中:
int Thread::_threadLoop(void* user)
{
Thread* const self = static_cast<Thread*>(user);
sp<Thread> strong(self->mHoldSelf);
wp<Thread> weak(strong);
self->mHoldSelf.clear();
#ifdef HAVE_ANDROID_OS
// this is very useful for debugging with gdb
self->mTid = gettid();
#endif
bool first = true;
do {
bool result;
if (first) {
first = false;
self->mStatus = self->readyToRun();
[// self->mStatus = self->readyToRun()
如果线程第一次启动, 则会调用readyToRun函数. readyToRun函数的实现如下:
status_t Thread::readyToRun()
{
return NO_ERROR;
}
readyToRun函数一般是由子类去重写的.
]// self->mStatus = self->readyToRun()
result = (self->mStatus == NO_ERROR);
if (result && !self->exitPending()) {
// Binder threads (and maybe others) rely on threadLoop
// running at least once after a successful ::readyToRun()
// (unless, of course, the thread has already been asked to exit
// at that point).
// This is because threads are essentially used like this:
// (new ThreadSubclass())->run();
// The caller therefore does not retain a strong reference to
// the thread and the thread would simply disappear after the
// successful ::readyToRun() call instead of entering the
// threadLoop at least once.
result = self->threadLoop();
[// result = self->threadLoop()
调用完readyToRun之后,就会调用threadLoop函数.
threadLoop函数是个虚函数,其实这里调用的是PoolThread类的threadLoop函数.
]// result = self->threadLoop()
}
} else {
result = self->threadLoop();
}
// establish a scope for mLock
{
Mutex::Autolock _l(self->mLock);
if (result == false || self->mExitPending) {
self->mExitPending = true;
self->mRunning = false;
// clear thread ID so that requestExitAndWait() does not exit if
// called by a new thread using the same thread ID as this one.
self->mThread = thread_id_t(-1);
// note that interested observers blocked in requestExitAndWait are
// awoken by broadcast, but blocked on mLock until break exits scope
self->mThreadExitedCondition.broadcast();
break;
}
}
// Release our strong reference, to let a chance to the thread
// to die a peaceful death.
strong.clear();
// And immediately, re-acquire a strong reference for the next loop
strong = weak.promote();
} while(strong != 0);
[// while(strong != 0)
可以看出,在创建的线程里会陷入一个死循环中去。
WARNNING : ProcessState的_threadLoop有一个死循环
]// while(strong != 0)
return 0;
}
]// res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread)
} else {
res = androidCreateRawThreadEtc(_threadLoop, this, name, priority, stack, &mThread);
}
if (res == false) {
mStatus = UNKNOWN_ERROR; // something happened!
mRunning = false;
mThread = thread_id_t(-1);
mHoldSelf.clear(); // "this" may have gone away after this.
return UNKNOWN_ERROR;
}
// Do not refer to mStatus here: The thread is already running (may, in fact
// already have exited with a valid mStatus result). The NO_ERROR indication
// here merely indicates successfully starting the thread and does not
// imply successful termination/execution.
return NO_ERROR;
// Exiting scope of mLock is a memory barrier and allows new thread to run
}
]// t->run(buf)
}
}
]// spawnPooledThread(true)
}
[// if (!mThreadPoolStarted)
如果还没有启动过,则会调用spawnPooledThread来启动Binder库的线程来负责与Binder驱动交互,并且将mThreadPoolStarted设置为true,表示已经启动过了。
]// if (!mThreadPoolStarted)
}
分析完ProcessState的设计, 我们开始分析IPCThreadState的设计和实现.
我们先看一下IPCThreadState类的声明:
class IPCThreadState
{
public:
static IPCThreadState* self();
static IPCThreadState* selfOrNull(); // self(), but won't instantiate
sp<ProcessState> process();
status_t clearLastError();
pid_t getCallingPid() const;
uid_t getCallingUid() const;
void setStrictModePolicy(int32_t policy);
int32_t getStrictModePolicy() const;
void setLastTransactionBinderFlags(int32_t flags);
int32_t getLastTransactionBinderFlags() const;
int64_t clearCallingIdentity();
void restoreCallingIdentity(int64_t token);
int setupPolling(int* fd);
status_t handlePolledCommands();
void flushCommands();
void joinThreadPool(bool isMain = true);
// Stop the local process.
void stopProcess(bool immediate = true);
status_t transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
void incStrongHandle(int32_t handle);
void decStrongHandle(int32_t handle);
void incWeakHandle(int32_t handle);
void decWeakHandle(int32_t handle);
status_t attemptIncStrongHandle(int32_t handle);
static void expungeHandle(int32_t handle, IBinder* binder);
status_t requestDeathNotification( int32_t handle,
BpBinder* proxy);
status_t clearDeathNotification( int32_t handle,
BpBinder* proxy);
static void shutdown();
// Call this to disable switching threads to background scheduling when
// receiving incoming IPC calls. This is specifically here for the
// Android system process, since it expects to have background apps calling
// in to it but doesn't want to acquire locks in its services while in
// the background.
static void disableBackgroundScheduling(bool disable);
private:
IPCThreadState();
~IPCThreadState();
status_t sendReply(const Parcel& reply, uint32_t flags);
status_t waitForResponse(Parcel *reply,
status_t *acquireResult=NULL);
status_t talkWithDriver(bool doReceive=true);
status_t writeTransactionData(int32_t cmd,
uint32_t binderFlags,
int32_t handle,
uint32_t code,
const Parcel& data,
status_t* statusBuffer);
status_t getAndExecuteCommand();
status_t executeCommand(int32_t command);
void processPendingDerefs();
void clearCaller();
static void threadDestructor(void *st);
static void freeBuffer(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsSize,
void* cookie);
const sp<ProcessState> mProcess;
[// const sp<ProcessState> mProcess
mProcess保存宿主进程
]// const sp<ProcessState> mProcess
const pid_t mMyThreadId;
Vector<BBinder*> mPendingStrongDerefs;
Vector<RefBase::weakref_type*> mPendingWeakDerefs;
Parcel mIn;
Parcel mOut;
[// Parcel mOut
这里mIn和mOut是为了保存传输数据.
mIn和mOut都是Parcel类型的, Parcel类的声明如下:
class Parcel
{
public:
class ReadableBlob;
class WritableBlob;
Parcel();
~Parcel();
const uint8_t* data() const;
size_t dataSize() const;
size_t dataAvail() const;
size_t dataPosition() const;
size_t dataCapacity() const;
status_t setDataSize(size_t size);
void setDataPosition(size_t pos) const;
status_t setDataCapacity(size_t size);
status_t setData(const uint8_t* buffer, size_t len);
status_t appendFrom(const Parcel *parcel,
size_t start, size_t len);
bool pushAllowFds(bool allowFds);
void restoreAllowFds(bool lastValue);
bool hasFileDescriptors() const;
// Writes the RPC header.
status_t writeInterfaceToken(const String16& interface);
// Parses the RPC header, returning true if the interface name
// in the header matches the expected interface from the caller.
//
// Additionally, enforceInterface does part of the work of
// propagating the StrictMode policy mask, populating the current
// IPCThreadState, which as an optimization may optionally be
// passed in.
bool enforceInterface(const String16& interface,
IPCThreadState* threadState = NULL) const;
bool checkInterface(IBinder*) const;
void freeData();
const size_t* objects() const;
size_t objectsCount() const;
status_t errorCheck() const;
void setError(status_t err);
status_t write(const void* data, size_t len);
void* writeInplace(size_t len);
status_t writeUnpadded(const void* data, size_t len);
status_t writeInt32(int32_t val);
status_t writeInt64(int64_t val);
status_t writeFloat(float val);
status_t writeDouble(double val);
status_t writeIntPtr(intptr_t val);
status_t writeCString(const char* str);
status_t writeString8(const String8& str);
status_t writeString16(const String16& str);
status_t writeString16(const char16_t* str, size_t len);
status_t writeStrongBinder(const sp<IBinder>& val);
status_t writeWeakBinder(const wp<IBinder>& val);
status_t write(const Flattenable& val);
template<typename T>
status_t write(const LightFlattenable<T>& val);
// Place a native_handle into the parcel (the native_handle's file-
// descriptors are dup'ed, so it is safe to delete the native_handle
// when this function returns).
// Doesn't take ownership of the native_handle.
status_t writeNativeHandle(const native_handle* handle);
// Place a file descriptor into the parcel. The given fd must remain
// valid for the lifetime of the parcel.
// The Parcel does not take ownership of the given fd unless you ask it to.
status_t writeFileDescriptor(int fd, bool takeOwnership = false);
// Place a file descriptor into the parcel. A dup of the fd is made, which
// will be closed once the parcel is destroyed.
status_t writeDupFileDescriptor(int fd);
// Writes a blob to the parcel.
// If the blob is small, then it is stored in-place, otherwise it is
// transferred by way of an anonymous shared memory region.
// The caller should call release() on the blob after writing its contents.
status_t writeBlob(size_t len, WritableBlob* outBlob);
status_t writeObject(const flat_binder_object& val, bool nullMetaData);
// Like Parcel.java's writeNoException(). Just writes a zero int32.
// Currently the native implementation doesn't do any of the StrictMode
// stack gathering and serialization that the Java implementation does.
status_t writeNoException();
void remove(size_t start, size_t amt);
status_t read(void* outData, size_t len) const;
const void* readInplace(size_t len) const;
int32_t readInt32() const;
status_t readInt32(int32_t *pArg) const;
int64_t readInt64() const;
status_t readInt64(int64_t *pArg) const;
float readFloat() const;
status_t readFloat(float *pArg) const;
double readDouble() const;
status_t readDouble(double *pArg) const;
intptr_t readIntPtr() const;
status_t readIntPtr(intptr_t *pArg) const;
const char* readCString() const;
String8 readString8() const;
String16 readString16() const;
const char16_t* readString16Inplace(size_t* outLen) const;
sp<IBinder> readStrongBinder() const;
wp<IBinder> readWeakBinder() const;
status_t read(Flattenable& val) const;
template<typename T>
status_t read(LightFlattenable<T>& val) const;
// Like Parcel.java's readExceptionCode(). Reads the first int32
// off of a Parcel's header, returning 0 or the negative error
// code on exceptions, but also deals with skipping over rich
// response headers. Callers should use this to read & parse the
// response headers rather than doing it by hand.
int32_t readExceptionCode() const;
// Retrieve native_handle from the parcel. This returns a copy of the
// parcel's native_handle (the caller takes ownership). The caller
// must free the native_handle with native_handle_close() and
// native_handle_delete().
native_handle* readNativeHandle() const;
// Retrieve a file descriptor from the parcel. This returns the raw fd
// in the parcel, which you do not own -- use dup() to get your own copy.
int readFileDescriptor() const;
// Reads a blob from the parcel.
// The caller should call release() on the blob after reading its contents.
status_t readBlob(size_t len, ReadableBlob* outBlob) const;
const flat_binder_object* readObject(bool nullMetaData) const;
// Explicitly close all file descriptors in the parcel.
void closeFileDescriptors();
typedef void (*release_func)(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsSize,
void* cookie);
const uint8_t* ipcData() const;
size_t ipcDataSize() const;
const size_t* ipcObjects() const;
size_t ipcObjectsCount() const;
void ipcSetDataReference(const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsCount,
release_func relFunc, void* relCookie);
void print(TextOutput& to, uint32_t flags = 0) const;
private:
Parcel(const Parcel& o);
Parcel& operator=(const Parcel& o);
status_t finishWrite(size_t len);
void releaseObjects();
void acquireObjects();
status_t growData(size_t len);
status_t restartWrite(size_t desired);
status_t continueWrite(size_t desired);
void freeDataNoInit();
void initState();
void scanForFds() const;
template<class T>
status_t readAligned(T *pArg) const;
template<class T> T readAligned() const;
template<class T>
status_t writeAligned(T val);
status_t mError;
uint8_t* mData;
[// uint8_t* mData
mData指向保存数据的数组
]// uint8_t* mData
size_t mDataSize;
[// size_t mDataSize
mDataSize表示现有保存数据的大小
]// size_t mDataSize
size_t mDataCapacity;
[// size_t mDataCapacity
mDataCapacity表示可以保存数据的容量
]// size_t mDataCapacity
mutable size_t mDataPos;
[// mutable size_t mDataPos
mDataPos现在保存数据的末尾
]// mutable size_t mDataPos
size_t* mObjects;
size_t mObjectsSize;
size_t mObjectsCapacity;
mutable size_t mNextObjectHint;
mutable bool mFdsKnown;
mutable bool mHasFds;
bool mAllowFds;
release_func mOwner;
void* mOwnerCookie;
class Blob {
public:
Blob();
~Blob();
void release();
inline size_t size() const { return mSize; }
protected:
void init(bool mapped, void* data, size_t size);
void clear();
bool mMapped;
void* mData;
size_t mSize;
};
public:
class ReadableBlob : public Blob {
friend class Parcel;
public:
inline const void* data() const { return mData; }
};
class WritableBlob : public Blob {
friend class Parcel;
public:
inline void* data() { return mData; }
};
};
在这里我们就看一下Parcel的构造函数的实现:
Parcel::Parcel()
{
LOG_ALLOC("Parcel %p: constructing", this);
initState();
[
这里会调用initState函数来初始化状态.
void Parcel::initState()
{
LOG_ALLOC("Parcel %p: initState", this);
mError = NO_ERROR;
mData = 0;
mDataSize = 0;
mDataCapacity = 0;
mDataPos = 0;
ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
mObjects = NULL;
mObjectsSize = 0;
mObjectsCapacity = 0;
mNextObjectHint = 0;
mHasFds = false;
mFdsKnown = true;
mAllowFds = true;
mOwner = NULL;
}
]
}
]// Parcel mOut
status_t mLastError;
pid_t mCallingPid;
uid_t mCallingUid;
int32_t mStrictModePolicy;
int32_t mLastTransactionBinderFlags;
};
和ProcessState类一样,我们也从IPCThreadState的self函数开始分析:
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
[// return new IPCThreadState
这里会调用IPCThreadState的构造函数来创建一个IPCThreadState对象.
IPCThreadState的构造函数如下:
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
[// mProcess(ProcessState::self())
这里保存宿主进程
]// mProcess(ProcessState::self())
mMyThreadId(gettid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);
[// pthread_setspecific(gTLS, this)
这里会将创建的IPCThreadState对象设置到ThreadLocal中去.
]// pthread_setspecific(gTLS, this)
clearCaller();
mIn.setDataCapacity(256);
[// mIn.setDataCapacity(256)
在构造函数中,首先调用setDataCapacity函数, setDataCapacity函数的定义如下:
status_t Parcel::setDataCapacity(size_t size)
{
if (size > mDataCapacity) return continueWrite(size);
[// if (size > mDataCapacity) return continueWrite(size)
mDataCapacity保存了当前数据缓冲区的容量, 初始值为0.
所有这里调用continueWrite函数来设置缓冲区的大小
status_t Parcel::continueWrite(size_t desired)
{
// If shrinking, first adjust for any objects that appear
// after the new data size.
size_t objectsSize = mObjectsSize;
[// size_t objectsSize = mObjectsSize
成员变量mObjectsSize表示数据缓冲区中Binder对象数组的大小, mObjectsSize的定义如下:
size_t mObjectsSize;
]// size_t objectsSize = mObjectsSize
if (desired < mDataSize) {
if (desired == 0) {
objectsSize = 0;
} else {
while (objectsSize > 0) {
if (mObjects[objectsSize-1] < desired)
break;
objectsSize--;
}
}
}
[// if (desired < mDataSize)
这里desired表示目标大小, mDataSize表示当前数据的大小.
如果desired < mDataSize,则表示要缩减数据缓冲区的大小.
这时候,如果desired值为0, 则简单的将objectsSize值设置为0, 否则则从后向前删除Binder对象数组的个数.
]// if (desired < mDataSize)
if (mOwner) {
[// if (mOwner)
成员变量mOwner是个函数指针, 定义如下:
release_func mOwner;
release_func声明如下:
typedef void (*release_func)(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsSize,
void* cookie);
]// if (mOwner)
// If the size is going to zero, just release the owner's data.
if (desired == 0) {
freeData();
return NO_ERROR;
}
// If there is a different owner, we need to take
// posession.
uint8_t* data = (uint8_t*)malloc(desired);
if (!data) {
mError = NO_MEMORY;
return NO_MEMORY;
}
binder_size_t* objects = NULL;
if (objectsSize) {
objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
if (!objects) {
free(data);
mError = NO_MEMORY;
return NO_MEMORY;
}
// Little hack to only acquire references on objects
// we will be keeping.
size_t oldObjectsSize = mObjectsSize;
mObjectsSize = objectsSize;
acquireObjects();
mObjectsSize = oldObjectsSize;
}
if (mData) {
memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
}
if (objects && mObjects) {
memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
}
//ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
mOwner = NULL;
LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mObjects = objects;
mDataSize = (mDataSize < desired) ? mDataSize : desired;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
mDataCapacity = desired;
mObjectsSize = mObjectsCapacity = objectsSize;
mNextObjectHint = 0;
} else if (mData) {
if (objectsSize < mObjectsSize) {
// Need to release refs on any objects we are dropping.
const sp<ProcessState> proc(ProcessState::self());
for (size_t i=objectsSize; i<mObjectsSize; i++) {
const flat_binder_object* flat
= reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
if (flat->type == BINDER_TYPE_FD) {
// will need to rescan because we may have lopped off the only FDs
mFdsKnown = false;
}
release_object(proc, *flat, this);
}
binder_size_t* objects =
(binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
if (objects) {
mObjects = objects;
}
mObjectsSize = objectsSize;
mNextObjectHint = 0;
}
// We own the data, so we can just do a realloc().
if (desired > mDataCapacity) {
uint8_t* data = (uint8_t*)realloc(mData, desired);
if (data) {
LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocSize -= mDataCapacity;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataCapacity = desired;
} else if (desired > mDataCapacity) {
mError = NO_MEMORY;
return NO_MEMORY;
}
} else {
if (mDataSize > desired) {
mDataSize = desired;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
}
if (mDataPos > desired) {
mDataPos = desired;
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
}
}
} else {
// This is the first data. Easy!
uint8_t* data = (uint8_t*)malloc(desired);
[// uint8_t* data = (uint8_t*)malloc(desired)
这里会申请一段内存,大小为desired字节
]// uint8_t* data = (uint8_t*)malloc(desired)
if (!data) {
mError = NO_MEMORY;
return NO_MEMORY;
}
if(!(mDataCapacity == 0 && mObjects == NULL
&& mObjectsCapacity == 0)) {
ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
}
LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataSize = mDataPos = 0;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
mDataCapacity = desired;
}
return NO_ERROR;
}
]// if (size > mDataCapacity) return continueWrite(size)
return NO_ERROR;
}
]// mIn.setDataCapacity(256)
mOut.setDataCapacity(256);
}
]// return new IPCThreadState
}
if (gShutdown) return NULL;
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
[// IPCThreadState* IPCThreadState::self()
从IPCThreadState的self函数中可以看出, IPCThreadState也是单例的, 是用ThreadLocal做的.
]// IPCThreadState* IPCThreadState::self()
调用完IPCThreadState的self函数得到一个全局的IPCThreadState对象之后,就可以调用joinThreadPool函数.
joinThreadPool函数的定义如下:
void IPCThreadState::joinThreadPool(bool isMain)
[// void IPCThreadState::joinThreadPool(bool isMain)
joinThreadPool函数有一个参数isMain, 其实默认参数为true;
]// void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
[// mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER)
这里首先发一个BC_ENTER_LOOPER来使得Binder线程池可用.
我们看一下Parcel的writeInt32函数的实现:
status_t Parcel::writeInt32(int32_t val)
{
return writeAligned(val);
[// return writeAligned(val)
writeInt32调用writeAligned函数,writeAligned函数的定义如下:
template<class T> status_t Parcel::writeAligned(T val) {
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
if ((mDataPos+sizeof(val)) <= mDataCapacity) {
restart_write:
*reinterpret_cast<T*>(mData+mDataPos) = val;
[// *reinterpret_cast<T*>(mData+mDataPos) = val
这里将数据写入到数据的末尾
]// *reinterpret_cast<T*>(mData+mDataPos) = val
return finishWrite(sizeof(val));
[// return finishWrite(sizeof(val))
写入完数据后,会调用finishWrite函数来更新长度信息,传入的参数就是刚刚写入的数据的长度
status_t Parcel::finishWrite(size_t len)
{
//printf("Finish write of %d\n", len);
mDataPos += len;
[// mDataPos += len
更新数据的位置信息
]// mDataPos += len
ALOGV("finishWrite Setting data pos of %p to %d\n", this, mDataPos);
if (mDataPos > mDataSize) {
mDataSize = mDataPos;
[// mDataSize = mDataPos
如果需要,更新现有数据的大小
]// mDataSize = mDataPos
ALOGV("finishWrite Setting data size of %p to %d\n", this, mDataSize);
}
//printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
return NO_ERROR;
}
]// return finishWrite(sizeof(val))
}
[// if ((mDataPos+sizeof(val)) <= mDataCapacity)
这里(mDataPos+sizeof(val)) <= mDataCapacity表示剩余容量可以写入数据。
]// if ((mDataPos+sizeof(val)) <= mDataCapacity)
status_t err = growData(sizeof(val));
[// status_t err = growData(sizeof(val))
如果mDataPos+sizeof(val)) > mDataCapacity,则说明现在的容量已经不够写入数据了,则调用growData函数来增加容量。
growData函数的定义如下:
status_t Parcel::growData(size_t len)
{
size_t newSize = ((mDataSize+len)*3)/2;
[// size_t newSize = ((mDataSize+len)*3)/2
这里保存新容量
]// size_t newSize = ((mDataSize+len)*3)/2
return (newSize <= mDataSize)
? (status_t) NO_MEMORY
: continueWrite(newSize);
[// return (newSize <= mDataSize)
这里会调用continueWrite来重新申请容量了。
]// return (newSize <= mDataSize)
}
]// status_t err = growData(sizeof(val))
if (err == NO_ERROR) goto restart_write;
return err;
}
]// return writeAligned(val)
}
]// mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER)
// This thread may have been spawned by a thread that was in the background
// scheduling group, so first we will make sure it is in the foreground
// one to avoid performing an initial transaction in the background.
set_sched_policy(mMyThreadId, SP_FOREGROUND);
status_t result;
do {
processPendingDerefs();
[// processPendingDerefs()
进入循环之后, 首先调用processPendingDerefs函数, processPendingDerefs函数其实是为了延缓执行减少强弱智能指针.
void IPCThreadState::processPendingDerefs()
{
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending = mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
RefBase::weakref_type* refs = mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
numPending = mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
BBinder* obj = mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
}
]// processPendingDerefs()
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();
[// result = getAndExecuteCommand()
这里调用getAndExecuteCommand函数来和Binder驱动进行交互.
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver();
[// result = talkWithDriver()
talkWithDriver函数的作用是发送命令给Binder驱动程序.
status_t IPCThreadState::talkWithDriver(bool doReceive)
[// status_t IPCThreadState::talkWithDriver(bool doReceive)
talkWithDriver的参数doReceive有一个默认参数true
]// status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
[// if (mProcess->mDriverFD <= 0)
这里的mDriverFD保存的就是打开的/dev/binder的文件描述符, 和Binder驱动程序交互就是通过它
]// if (mProcess->mDriverFD <= 0)
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
[// const bool needRead = mIn.dataPosition() >= mIn.dataSize()
Parcel类的dataPosition函数定义如下:
size_t Parcel::dataPosition() const
{
return mDataPos;
}
Parcel类的dataSize函数定义如下:
size_t Parcel::dataSize() const
{
return (mDataSize > mDataPos ? mDataSize : mDataPos);
}
可以看出dataSize返回的是真正的数据的大小。
这里needRead其实是表示是否还要从Binder驱动读取数据。
如果mIn.dataPosition() < mIn.dataSize(),则说明mIn还有数据没读完。
]// const bool needRead = mIn.dataPosition() >= mIn.dataSize()
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
[// const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0
这里是判读是否输出数据给Binder驱动处理,条件就是doReceive为false或者needRead为true。
为什么doReceive要为false呢?doReceive默认参数为true啊。
如果needRead为false,则说明还有数据没读完了,因此就不要再给Binder发数据了。
]// const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
[// bwr.write_buffer = (uintptr_t)mOut.data()
这里将输出数据的大小和数据准备好
]// bwr.write_buffer = (uintptr_t)mOut.data()
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
[// if (doReceive && needRead)
这里是准备读取的。如果needRead为true,则将读取的数据位置设置为mIn的数据缓冲区,大小为mIn的容量。
]// if (doReceive && needRead)
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
[// if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR
这里如果读和写都为0,则不需要读和写,那直接返回吧。
]// if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR
bwr.write_consumed = 0;
bwr.read_consumed = 0;
[// bwr.read_consumed = 0
上面这两句代码,是为了在与Binder驱动程序交互前,将读和写的消耗设置为0.
]// bwr.read_consumed = 0
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(HAVE_ANDROID_OS)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
[// if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
这里通过ioctl函数, 将命令发送给Binder驱动, 并取回Binder驱动发送过来的命令
]// if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
[// if (bwr.write_consumed > 0)
如果bwr.write_consumed > 0,则说明Binder驱动已经消耗了Binder库发送给Binder驱动的命令。
]// if (bwr.write_consumed > 0)
if (bwr.write_consumed < mOut.dataSize())
[// if (bwr.write_consumed < mOut.dataSize())
如果bwr.write_consumed < mOut.dataSize(),则说明没有完全消耗完发送给Binder驱动的命令
]// if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
[// mOut.remove(0, bwr.write_consumed)
调用remove函数来删除部门数据,我们看一下remove函数的实现
void Parcel::remove(size_t start, size_t amt)
{
LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
}
我插!remove函数居然啥也没实现。
]// mOut.remove(0, bwr.write_consumed)
else
mOut.setDataSize(0);
[// mOut.setDataSize(0)
调用setDataSize(0)就相当于将数据都删除。
setDataSize的函数实现如下:
status_t Parcel::setDataSize(size_t size)
{
status_t err;
err = continueWrite(size);
if (err == NO_ERROR) {
mDataSize = size;
ALOGV("setDataSize Setting data size of %p to %d\n", this, mDataSize);
}
return err;
}
]// mOut.setDataSize(0)
}
if (bwr.read_consumed > 0) {
[// if (bwr.read_consumed > 0)
如果bwr.read_consumed > 0, 则说明Binder驱动有返回。这时候就需要读取从Binder驱动返回的命令。
]// if (bwr.read_consumed > 0)
mIn.setDataSize(bwr.read_consumed);
[// mIn.setDataSize(bwr.read_consumed)
这里将mIn的数据大小设置为read_consumed,
]// mIn.setDataSize(bwr.read_consumed)
mIn.setDataPosition(0);
[// mIn.setDataPosition(0)
调用setDataPosition(0)函数,将数据的位置设置为0,指向数据的开头。
]// mIn.setDataPosition(0)
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
]// result = talkWithDriver()
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
[// cmd = mIn.readInt32()
这里取回从Binder驱动程序返回的命令.
但是,这里为什么只取一个????????
readInt32函数的定义如下:
int32_t Parcel::readInt32() const
{
return readAligned<int32_t>();
[// return readAligned<int32_t>()
template<class T> T Parcel::readAligned() const {
T result;
if (readAligned(&result) != NO_ERROR) {
[// if (readAligned(&result) != NO_ERROR)
readAligned函数的定义如下:
template<class T> status_t Parcel::readAligned(T *pArg) const {
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
if ((mDataPos+sizeof(T)) <= mDataSize) {
const void* data = mData+mDataPos;
mDataPos += sizeof(T);
*pArg = *reinterpret_cast<const T*>(data);
return NO_ERROR;
} else {
return NOT_ENOUGH_DATA;
}
}
由readAligned函数的定义可以看出,readAligned函数的实现还是很简单的,就是将数据取出,并更新mDataPos,使其指向下一个数据的位置。
]// if (readAligned(&result) != NO_ERROR)
result = 0;
}
return result;
}
]// return readAligned<int32_t>()
}
]// cmd = mIn.readInt32()
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
result = executeCommand(cmd);
[// result = executeCommand(cmd)
这里调用executeCommand来执行从Binder驱动程序返回的命令.
executeCommand函数的实现如下:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
obj->incStrong(mProcess.get());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);
obj->printRefs();
}
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_RELEASE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_RELEASE from driver on %p", obj);
obj->printRefs();
}
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
// NOTE: This assertion is not valid, because the object may no
// longer exist (thus the (BBinder*)cast above resulting in a different
// memory address).
//ALOG_ASSERT(refs->refBase() == obj,
// "BR_DECREFS: object %p does not match cookie %p (expected %p)",
// refs, obj, refs->refBase());
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
ALOG_ASSERT(success && refs->refBase() == obj,
"BR_ATTEMPT_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
if (curPrio > ANDROID_PRIORITY_NORMAL) {
// We have inherited a reduced priority from the caller, but do not
// want to run in that state in this process. The driver set our
// priority already (though not our scheduling class), so bounce
// it back to the default before invoking the transaction.
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
// We want to use the inherited priority from the caller.
// Ensure this thread is in the background scheduling class,
// since the driver won't modify scheduling classes for us.
// The scheduling group is reset to default by the caller
// once this method returns after the transaction is complete.
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_TRANSACTION thr " << (void*)pthread_self()
<< " / obj " << tr.target.ptr << " / code "
<< TypeCode(tr.code) << ": " << indent << buffer
<< dedent << endl
<< "Data addr = "
<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
<< ", offsets addr="
<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
}
if (tr.target.ptr) {
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
[// error = b->transact(tr.code, buffer, &reply, tr.flags)
这里会调用BBinder的transact函数来传递命令,BBinder的transact函数定义如下
status_t BBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);
[// err = onTransact(code, data, reply, flags)
调用onTransact来调用用户自定义的onTransact实现
]// err = onTransact(code, data, reply, flags)
break;
}
if (reply != NULL) {
reply->setDataPosition(0);
}
return err;
}
]// error = b->transact(tr.code, buffer, &reply, tr.flags)
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
// mCallingPid, origPid, origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
]// result = executeCommand(cmd)
// After executing the command, ensure that the thread is returned to the
// foreground cgroup before rejoining the pool. The driver takes care of
// restoring the priority, but doesn't do anything with cgroups so we
// need to take care of that here in userspace. Note that we do make
// sure to go in the foreground after executing a transaction, but
// there are other callbacks into user code that could have changed
// our group so we want to make absolutely sure it is put back.
set_sched_policy(mMyThreadId, SP_FOREGROUND);
}
return result;
}
]// result = getAndExecuteCommand()
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting", mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
[// while (result != -ECONNREFUSED && result != -EBADF)
这里会进入一个死循环来处理与Binder驱动程序的交互.
WARNNING : IPCThreadState的joinThreadPool有一个死循环.
]// while (result != -ECONNREFUSED && result != -EBADF)
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p\n",
(void*)pthread_self(), getpid(), (void*)result);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
[// talkWithDriver(false)
如果程序运行到这里,则说明Binder库因为各种原因要退出了, 因此这里会发送一个BC_EXIT_LOOPER命令给Binder驱动程序.
]// talkWithDriver(false)
[// case BINDER_SET_MAX_THREADS
BINDER_SET_MAX_THREADS是设置Binder线程池的大小。
]// case BINDER_SET_MAX_THREADS
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
[// if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads)))
由此可见,BINDER_SET_MAX_THREADS的实现很简单,就是设置binder_proc的max_threads字段。
]// if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads)))
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
[// case BINDER_SET_CONTEXT_MGR
BINDER_SET_CONTEXT_MGR是注册Service Manager服务的。
]// case BINDER_SET_CONTEXT_MGR
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
[// if (binder_context_mgr_node != NULL)
binder_context_mgr_node是一个全局的静态变量。指向全局的Service Manager的Binder实体对象。
当它不为NULL时,说明已经设置过Service Manager
static struct binder_node *binder_context_mgr_node;
]// if (binder_context_mgr_node != NULL)
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
} else
binder_context_mgr_uid = current->cred->euid;
[// if (binder_context_mgr_uid != -1)
binder_context_mgr_uid也是一个静态的全局变量,保存Service Manager的进程ID。
binder_context_mgr_uid的定义如下:
static uid_t binder_context_mgr_uid = -1;
当binder_context_mgr_uid不为-1时,说明前面已经设置过Service Manager服务了。
这时候会判断当前进程是否是Service Manager注册的进程,也就是检查合法性
]// if (binder_context_mgr_uid != -1)
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
[// binder_context_mgr_node = binder_new_node(proc, NULL, NULL)
程序走到这里,则会调用binder_new_node来创建一个Binder实体对象。
binder_new_node函数的实现如下所示:
static struct binder_node *binder_new_node(struct binder_proc *proc,
void __user *ptr,
void __user *cookie)
[// static struct binder_node *binder_new_node(struct binder_proc *proc,
第一个参数proc用来描述Service Manager进程。
第二个参数ptr用来描述一个Binder本地对象,指向该Binder本地对象内部的一个弱引用计数对象的地址值
第三个参数cookie也用来描述一个Binder本地对象,指向该Binder本地对象的地址值
]// static struct binder_node *binder_new_node(struct binder_proc *proc,
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
[// while (*p)
在这个while循环中以参数ptr为关键字,在红黑树中检查前面是否已经为参数ptr和cookie所描述的Binder本地对象创建过Binder实体对象。
]// while (*p)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p created\n",
proc->pid, current->pid, node->debug_id,
node->ptr, node->cookie);
return node;
[// return node;
上面这段代码是创建一个Binder实体对象,并且对它进行初始化,最后将它加入到宿主进程的成员变量nodes所描述的一个红黑树中。
]// return node;
}
]// binder_context_mgr_node = binder_new_node(proc, NULL, NULL)
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
[// binder_context_mgr_node->has_weak_ref = 1
上面这四句代码是与引用计数相关的。
]// binder_context_mgr_node->has_weak_ref = 1
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION:
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
[// if (thread)
上面这句代码是将当前线程的状态位BINDER_LOOPER_STATE_NEED_RETURN清零,这样当前线程下次再次进入到Binder驱动程序时,Binder驱动程序就可以将进程间通信请求分发给它处理了。
]// if (thread)
mutex_unlock(&binder_lock);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
上面的分析主要集中在Binder驱动程序中,Android系统在Frameworks层中将各个Binder驱动程序操作封装成一个Binder库,这样进程就可以方便地调用Binder库提供的接口来实现进程间通信。
接下来,我们分析一下Binder库的实现。
在Binder库中,Service组件和Client组件分别使用模版类BnInterface和BpInterface来描述,其中,前者称为Binder本地对象,后者称为Binder代理对象。
BnInterface和BpInterface的作用是为了代表驱动程序中的Binder实体对象和Binder引用对象。
模版类BnInterface和BpInterface定义在frameworks/base/include/binder/IInterface.h文件中,定义如下:
template<typename INTERFACE>
class BnInterface : public INTERFACE, public BBinder
{
public:
virtual sp<IInterface> queryLocalInterface(const String16& _descriptor);
virtual const String16& getInterfaceDescriptor() const;
protected:
virtual IBinder* onAsBinder();
};
template<typename INTERFACE>
class BpInterface : public INTERFACE, public BpRefBase
{
public:
BpInterface(const sp<IBinder>& remote);
[// BpInterface(const sp<IBinder>& remote)
BpInterface的构造函数实现如下:
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)
{
}
]// BpInterface(const sp<IBinder>& remote)
protected:
virtual IBinder* onAsBinder();
};
其中BnInterface和BpInterface都是模版类,其中模版参数INTERFACE要求必须继承自IInterface接口,这个接口也定义在frameworks/base/include/binder/IInterface.h文件中。
而且要求BnInterface和BpInterface都继承自INTERFACE!我靠,这设计够牛逼!
IInterface的定义如下:
class IInterface : public virtual RefBase
[// class IInterface : public virtual RefBase
可以看出IInterface继承了RefBase, 表示可以通过智能指针来管理对象
]// class IInterface : public virtual RefBase
{
public:
IInterface();
sp<IBinder> asBinder();
sp<const IBinder> asBinder() const;
protected:
virtual ~IInterface();
virtual IBinder* onAsBinder() = 0;
};
我们首先分析Binder本地对象BnInterface的设计,从上面BnInterface类的声明上可以看出除了继承自IInterface之外,还继承了BBinder类,这个类为Binder本地对象提供了抽象的进程间通信接口。BBinder类的定义如下:
BBinder类的定义存在与frameworks\native\include\binder\Binder.h文件中,BBinder类的定义如下:
class BBinder : public IBinder
[// class BBinder : public IBinder
在BBinder类有两个重要的成员函数transact和onTransact.
当一个Binder代理对象通过Binder驱动程序向一个Binder本地对象发出一个进程间通信请求时,Binder驱动程序就会调用该Binder本地对象的成员transact来处理该请求。
成员函数onTransact是由BBinder的子类,即Binder本地对象类来实现的,它负责分发与业务相关的进程间通信请求。事实上,与业务相关的进程间通信请求是由Binder本地对象类的子类,即Service组件类来负责处理的。
]// class BBinder : public IBinder
{
public:
BBinder();
virtual const String16& getInterfaceDescriptor() const;
virtual bool isBinderAlive() const;
virtual status_t pingBinder();
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t transact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
virtual status_t linkToDeath(const sp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0);
virtual status_t unlinkToDeath( const wp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL);
virtual void attachObject( const void* objectID,
void* object,
void* cleanupCookie,
object_cleanup_func func);
virtual void* findObject(const void* objectID) const;
virtual void detachObject(const void* objectID);
virtual BBinder* localBinder();
protected:
virtual ~BBinder();
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
private:
BBinder(const BBinder& o);
BBinder& operator=(const BBinder& o);
class Extras;
Extras* mExtras;
void* mReserved0;
};
IBinder类的定义如下所示,定义在frameworks\native\include\binder\IBinder.h文件中
class IBinder : public virtual RefBase
{
public:
enum {
FIRST_CALL_TRANSACTION = 0x00000001,
LAST_CALL_TRANSACTION = 0x00ffffff,
PING_TRANSACTION = B_PACK_CHARS('_','P','N','G'),
DUMP_TRANSACTION = B_PACK_CHARS('_','D','M','P'),
INTERFACE_TRANSACTION = B_PACK_CHARS('_', 'N', 'T', 'F'),
SYSPROPS_TRANSACTION = B_PACK_CHARS('_', 'S', 'P', 'R'),
// Corresponds to TF_ONE_WAY -- an asynchronous call.
FLAG_ONEWAY = 0x00000001
};
IBinder();
/**
* Check if this IBinder implements the interface named by
* @a descriptor. If it does, the base pointer to it is returned,
* which you can safely static_cast<> to the concrete C++ interface.
*/
virtual sp<IInterface> queryLocalInterface(const String16& descriptor);
/**
* Return the canonical name of the interface provided by this IBinder
* object.
*/
virtual const String16& getInterfaceDescriptor() const = 0;
virtual bool isBinderAlive() const = 0;
virtual status_t pingBinder() = 0;
virtual status_t dump(int fd, const Vector<String16>& args) = 0;
virtual status_t transact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0) = 0;
/**
* This method allows you to add data that is transported through
* IPC along with your IBinder pointer. When implementing a Binder
* object, override it to write your desired data in to @a outData.
* You can then call getConstantData() on your IBinder to retrieve
* that data, from any process. You MUST return the number of bytes
* written in to the parcel (including padding).
*/
class DeathRecipient : public virtual RefBase
{
public:
virtual void binderDied(const wp<IBinder>& who) = 0;
};
/**
* Register the @a recipient for a notification if this binder
* goes away. If this binder object unexpectedly goes away
* (typically because its hosting process has been killed),
* then DeathRecipient::binderDied() will be called with a reference
* to this.
*
* The @a cookie is optional -- if non-NULL, it should be a
* memory address that you own (that is, you know it is unique).
*
* @note You will only receive death notifications for remote binders,
* as local binders by definition can't die without you dying as well.
* Trying to use this function on a local binder will result in an
* INVALID_OPERATION code being returned and nothing happening.
*
* @note This link always holds a weak reference to its recipient.
*
* @note You will only receive a weak reference to the dead
* binder. You should not try to promote this to a strong reference.
* (Nor should you need to, as there is nothing useful you can
* directly do with it now that it has passed on.)
*/
virtual status_t linkToDeath(const sp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0) = 0;
/**
* Remove a previously registered death notification.
* The @a recipient will no longer be called if this object
* dies. The @a cookie is optional. If non-NULL, you can
* supply a NULL @a recipient, and the recipient previously
* added with that cookie will be unlinked.
*/
virtual status_t unlinkToDeath( const wp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL) = 0;
virtual bool checkSubclass(const void* subclassID) const;
typedef void (*object_cleanup_func)(const void* id, void* obj, void* cleanupCookie);
virtual void attachObject( const void* objectID,
void* object,
void* cleanupCookie,
object_cleanup_func func) = 0;
virtual void* findObject(const void* objectID) const = 0;
virtual void detachObject(const void* objectID) = 0;
virtual BBinder* localBinder();
virtual BpBinder* remoteBinder();
protected:
virtual ~IBinder();
private:
};
接下来我们开始BpInterface的设计,BpInterface除了继承了IInterface之外,还继承了BpRefBase类,BpRefBase类为Binder代理对象提供了抽象的进程间通信接口。BpRefBase类的定义如下:
BpRefBase类的声明在frameworks\native\include\binder\Binder.h
class BpRefBase : public virtual RefBase
{
protected:
BpRefBase(const sp<IBinder>& o);
[// BpRefBase(const sp<IBinder>& o)
BpRefBase类的构造函数如下:
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
[// mRemote(o.get())
这里初始化mRemote成员变量
]// mRemote(o.get())
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this); // Removed on first IncStrong().
mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
}
}
]// BpRefBase(const sp<IBinder>& o)
virtual ~BpRefBase();
virtual void onFirstRef();
virtual void onLastStrongRef(const void* id);
virtual bool onIncStrongAttempted(uint32_t flags, const void* id);
inline IBinder* remote() { return mRemote; }
inline IBinder* remote() const { return mRemote; }
private:
BpRefBase(const BpRefBase& o);
BpRefBase& operator=(const BpRefBase& o);
IBinder* const mRemote;
[// IBinder* const mRemote
这里mRemote指向了一个Binder代理对象,其实是一个BpBinder类型的对象。
]// IBinder* const mRemote
RefBase::weakref_type* mRefs;
volatile int32_t mState;
};
从BpRefBase类的声明中可以看出,BpRefBase继承了RefBase。
总结一下BnInterface和BpInterface类集成体系:
BnInterface->(INTERFACE->IInterface->RefBase, BBinder->IBinder->RefBase)
BpInterface->(INTERFACE->IInterface->RefBase, BpRefBase->RefBase)
我们接着
在Binder库中,Android系统还设计了两个很重要的类: IPCThreadState和ProcessState类。
这两个类的作用是为了和Binder驱动程序交互。
用户在设计自己的Service组件和client组件时,一般的流程是首先将Service组件加入到Service Manager中去,然后调用
ProcessState::self()->startThreadPool()
IPCThreadState::self()->joinThreadPool();
这两句代码。
这两句代码的作用就是注册Binder线程池,表示可以处理Binder进程间通信请求了。
我们就从上面两句代码开始分析FrameWorks的Binder库。
我们先看一下ProcessState类的声明:
class ProcessState : public virtual RefBase
{
public:
static sp<ProcessState> self();
void setContextObject(const sp<IBinder>& object);
sp<IBinder> getContextObject(const sp<IBinder>& caller);
void setContextObject(const sp<IBinder>& object,
const String16& name);
sp<IBinder> getContextObject(const String16& name,
const sp<IBinder>& caller);
void startThreadPool();
typedef bool (*context_check_func)(const String16& name,
const sp<IBinder>& caller,
void* userData);
bool isContextManager(void) const;
bool becomeContextManager(
context_check_func checkFunc,
void* userData);
sp<IBinder> getStrongProxyForHandle(int32_t handle);
wp<IBinder> getWeakProxyForHandle(int32_t handle);
void expungeHandle(int32_t handle, IBinder* binder);
void setArgs(int argc, const char* const argv[]);
int getArgC() const;
const char* const* getArgV() const;
void setArgV0(const char* txt);
void spawnPooledThread(bool isMain);
status_t setThreadPoolMaxThreadCount(size_t maxThreads);
private:
friend class IPCThreadState;
ProcessState();
~ProcessState();
ProcessState(const ProcessState& o);
ProcessState& operator=(const ProcessState& o);
struct handle_entry {
IBinder* binder;
RefBase::weakref_type* refs;
};
handle_entry* lookupHandleLocked(int32_t handle);
int mDriverFD;
[// int mDriverFD
mDriverFD保存打开/dev/binder之后的文件描述符,就通过这个变量
]// int mDriverFD
void* mVMStart;
mutable Mutex mLock; // protects everything below.
Vector<handle_entry>mHandleToObject;
bool mManagesContexts;
context_check_func mBinderContextCheckFunc;
void* mBinderContextUserData;
KeyedVector<String16, sp<IBinder> >
mContexts;
String8 mRootDir;
bool mThreadPoolStarted;
[// bool mThreadPoolStarted
mThreadPoolStarted变量用于指示Binder库是否启动过
]// bool mThreadPoolStarted
volatile int32_t mThreadPoolSeq;
};
我们从ProcessState类的self()函数开始分析ProcessState的设计和实现:
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
[// gProcess = new ProcessState
这里调用ProcessState的构造函数来构造一个ProcessState对象并赋值给全局变量gProcess,我们来看一下ProcessState的构造函数的实现:
ProcessState::ProcessState()
: mDriverFD(open_driver())
[// mDriverFD(open_driver())
初始化mDriverFD是通过调用open_driver来初始化的。
open_driver函数的作用是打开/dev/binder文件,并做一些初始化的工作。
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR);
[// int fd = open("/dev/binder", O_RDWR)
打开/dev/binder文件
]// int fd = open("/dev/binder", O_RDWR)
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
[// status_t result = ioctl(fd, BINDER_VERSION, &vers)
首先通过ioctl函数获得Binder驱动程序的Binder内核的版本号。
这会陷入到Binder驱动程序的binder_ioctl函数中去。
]// status_t result = ioctl(fd, BINDER_VERSION, &vers)
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
[// if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION)
上面这段代码是为了验证Binder的版本。
]// if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION)
size_t maxThreads = 15;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
[// result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads)
这里是设置Binder线程池的大小,由此可以看出,默认的Binder线程池的大小为15。
]// result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads)
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
]// mDriverFD(open_driver())
, mVMStart(MAP_FAILED)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// XXX Ideally, there should be a specific define for whether we
// have mmap (or whether we could possibly have the kernel module
// availabla).
#if !defined(HAVE_WIN32_IPC)
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
[// mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0)
这里调用mmap函数来使得Binder驱动分配空间。
]// mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0)
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
#else
mDriverFD = -1;
#endif
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
]// gProcess = new ProcessState
return gProcess;
}
[// sp<ProcessState> ProcessState::self()
从self函数的实现可以看出ProcessState是一个单例模式。
]// sp<ProcessState> ProcessState::self()
分析完ProcessState的self函数之后,我们就看一下ProcessState的startThreadPool函数的实现:
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
[// spawnPooledThread(true)
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
char buf[16];
snprintf(buf, sizeof(buf), "Binder_%X", s);
ALOGV("Spawning new pooled thread, name=%s\n", buf);
[// ALOGV("Spawning new pooled thread, name=%s\n", buf)
这里初始化线程的名字
]// ALOGV("Spawning new pooled thread, name=%s\n", buf)
sp<Thread> t = new PoolThread(isMain);
[// sp<Thread> t = new PoolThread(isMain)
这里构造了一个PoolThread对象,PoolThread类的定义如下:
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
[// PoolThread(bool isMain)
PoolThread的构造函数很简单,基本是个空函数。
PoolThread的父类Thread的构造函数详见Thread类
]// PoolThread(bool isMain)
{
}
protected:
virtual bool threadLoop()
[// virtual bool threadLoop()
threadLoop函数是一个很重要的函数.创建的线程实际调用的就是这个函数.
]// virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
[// IPCThreadState::self()->joinThreadPool(mIsMain)
这里会调用IPCThreadState的joinThreadPool函数,详见IPCThreadState类的设计和分析.
]// IPCThreadState::self()->joinThreadPool(mIsMain)
return false;
}
const bool mIsMain;
};
PoolThread类又继承自Thread类,Thread类的声明如下:
class Thread : virtual public RefBase
{
public:
// Create a Thread object, but doesn't create or start the associated
// thread. See the run() method.
Thread(bool canCallJava = true);
[// Thread(bool canCallJava = true)
Thread类的构造函数如下:
Thread::Thread(bool canCallJava)
: mCanCallJava(canCallJava),
[// mCanCallJava(canCallJava)
可以看出, mCanCallJava默认是true
]// mCanCallJava(canCallJava)
mThread(thread_id_t(-1)),
mLock("Thread::mLock"),
mStatus(NO_ERROR),
mExitPending(false), mRunning(false)
#ifdef HAVE_ANDROID_OS
, mTid(-1)
#endif
{
}
]// Thread(bool canCallJava = true)
virtual ~Thread();
// Start the thread in threadLoop() which needs to be implemented.
virtual status_t run( const char* name = 0,
int32_t priority = PRIORITY_DEFAULT,
size_t stack = 0);
// Ask this object's thread to exit. This function is asynchronous, when the
// function returns the thread might still be running. Of course, this
// function can be called from a different thread.
virtual void requestExit();
// Good place to do one-time initializations
virtual status_t readyToRun();
// Call requestExit() and wait until this object's thread exits.
// BE VERY CAREFUL of deadlocks. In particular, it would be silly to call
// this function from this object's thread. Will return WOULD_BLOCK in
// that case.
status_t requestExitAndWait();
// Wait until this object's thread exits. Returns immediately if not yet running.
// Do not call from this object's thread; will return WOULD_BLOCK in that case.
status_t join();
#ifdef HAVE_ANDROID_OS
// Return the thread's kernel ID, same as the thread itself calling gettid() or
// androidGetTid(), or -1 if the thread is not running.
pid_t getTid() const;
#endif
protected:
// exitPending() returns true if requestExit() has been called.
bool exitPending() const;
private:
// Derived class must implement threadLoop(). The thread starts its life
// here. There are two ways of using the Thread object:
// 1) loop: if threadLoop() returns true, it will be called again if
// requestExit() wasn't called.
// 2) once: if threadLoop() returns false, the thread will exit upon return.
virtual bool threadLoop() = 0;
private:
Thread& operator=(const Thread&);
static int _threadLoop(void* user);
const bool mCanCallJava;
// always hold mLock when reading or writing
thread_id_t mThread;
mutable Mutex mLock;
Condition mThreadExitedCondition;
status_t mStatus;
// note that all accesses of mExitPending and mRunning need to hold mLock
volatile bool mExitPending;
volatile bool mRunning;
sp<Thread> mHoldSelf;
#ifdef HAVE_ANDROID_OS
// legacy for debugging, not used by getTid() as it is set by the child thread
// and so is not initialized until the child reaches that point
pid_t mTid;
#endif
};
PoolThread类继承自RefBase,说明可以用智能指针管理PoolThread。
]// sp<Thread> t = new PoolThread(isMain)
t->run(buf);
[// t->run(buf)
构造了PoolThread对象之外,就会调用run来启动线程,run函数其实是继承在Thread类的。
status_t Thread::run(const char* name, int32_t priority, size_t stack)
{
Mutex::Autolock _l(mLock);
if (mRunning) {
// thread already started
return INVALID_OPERATION;
}
// reset status and exitPending to their default value, so we can
// try again after an error happened (either below, or in readyToRun())
mStatus = NO_ERROR;
mExitPending = false;
mThread = thread_id_t(-1);
// hold a strong reference on ourself
mHoldSelf = this;
mRunning = true;
bool res;
if (mCanCallJava) {
res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread);
[// res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread)
这里调用createThreadEtc函数,createThreadEtc函数是在哪定义的呢?我找不到啊!!!!!!
我猜想是创建一个线程,并且调用_threadLoop函数。
_threadLoop函数是定义在Thread类中:
int Thread::_threadLoop(void* user)
{
Thread* const self = static_cast<Thread*>(user);
sp<Thread> strong(self->mHoldSelf);
wp<Thread> weak(strong);
self->mHoldSelf.clear();
#ifdef HAVE_ANDROID_OS
// this is very useful for debugging with gdb
self->mTid = gettid();
#endif
bool first = true;
do {
bool result;
if (first) {
first = false;
self->mStatus = self->readyToRun();
[// self->mStatus = self->readyToRun()
如果线程第一次启动, 则会调用readyToRun函数. readyToRun函数的实现如下:
status_t Thread::readyToRun()
{
return NO_ERROR;
}
readyToRun函数一般是由子类去重写的.
]// self->mStatus = self->readyToRun()
result = (self->mStatus == NO_ERROR);
if (result && !self->exitPending()) {
// Binder threads (and maybe others) rely on threadLoop
// running at least once after a successful ::readyToRun()
// (unless, of course, the thread has already been asked to exit
// at that point).
// This is because threads are essentially used like this:
// (new ThreadSubclass())->run();
// The caller therefore does not retain a strong reference to
// the thread and the thread would simply disappear after the
// successful ::readyToRun() call instead of entering the
// threadLoop at least once.
result = self->threadLoop();
[// result = self->threadLoop()
调用完readyToRun之后,就会调用threadLoop函数.
threadLoop函数是个虚函数,其实这里调用的是PoolThread类的threadLoop函数.
]// result = self->threadLoop()
}
} else {
result = self->threadLoop();
}
// establish a scope for mLock
{
Mutex::Autolock _l(self->mLock);
if (result == false || self->mExitPending) {
self->mExitPending = true;
self->mRunning = false;
// clear thread ID so that requestExitAndWait() does not exit if
// called by a new thread using the same thread ID as this one.
self->mThread = thread_id_t(-1);
// note that interested observers blocked in requestExitAndWait are
// awoken by broadcast, but blocked on mLock until break exits scope
self->mThreadExitedCondition.broadcast();
break;
}
}
// Release our strong reference, to let a chance to the thread
// to die a peaceful death.
strong.clear();
// And immediately, re-acquire a strong reference for the next loop
strong = weak.promote();
} while(strong != 0);
[// while(strong != 0)
可以看出,在创建的线程里会陷入一个死循环中去。
WARNNING : ProcessState的_threadLoop有一个死循环
]// while(strong != 0)
return 0;
}
]// res = createThreadEtc(_threadLoop, this, name, priority, stack, &mThread)
} else {
res = androidCreateRawThreadEtc(_threadLoop, this, name, priority, stack, &mThread);
}
if (res == false) {
mStatus = UNKNOWN_ERROR; // something happened!
mRunning = false;
mThread = thread_id_t(-1);
mHoldSelf.clear(); // "this" may have gone away after this.
return UNKNOWN_ERROR;
}
// Do not refer to mStatus here: The thread is already running (may, in fact
// already have exited with a valid mStatus result). The NO_ERROR indication
// here merely indicates successfully starting the thread and does not
// imply successful termination/execution.
return NO_ERROR;
// Exiting scope of mLock is a memory barrier and allows new thread to run
}
]// t->run(buf)
}
}
]// spawnPooledThread(true)
}
[// if (!mThreadPoolStarted)
如果还没有启动过,则会调用spawnPooledThread来启动Binder库的线程来负责与Binder驱动交互,并且将mThreadPoolStarted设置为true,表示已经启动过了。
]// if (!mThreadPoolStarted)
}
分析完ProcessState的设计, 我们开始分析IPCThreadState的设计和实现.
我们先看一下IPCThreadState类的声明:
class IPCThreadState
{
public:
static IPCThreadState* self();
static IPCThreadState* selfOrNull(); // self(), but won't instantiate
sp<ProcessState> process();
status_t clearLastError();
pid_t getCallingPid() const;
uid_t getCallingUid() const;
void setStrictModePolicy(int32_t policy);
int32_t getStrictModePolicy() const;
void setLastTransactionBinderFlags(int32_t flags);
int32_t getLastTransactionBinderFlags() const;
int64_t clearCallingIdentity();
void restoreCallingIdentity(int64_t token);
int setupPolling(int* fd);
status_t handlePolledCommands();
void flushCommands();
void joinThreadPool(bool isMain = true);
// Stop the local process.
void stopProcess(bool immediate = true);
status_t transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
void incStrongHandle(int32_t handle);
void decStrongHandle(int32_t handle);
void incWeakHandle(int32_t handle);
void decWeakHandle(int32_t handle);
status_t attemptIncStrongHandle(int32_t handle);
static void expungeHandle(int32_t handle, IBinder* binder);
status_t requestDeathNotification( int32_t handle,
BpBinder* proxy);
status_t clearDeathNotification( int32_t handle,
BpBinder* proxy);
static void shutdown();
// Call this to disable switching threads to background scheduling when
// receiving incoming IPC calls. This is specifically here for the
// Android system process, since it expects to have background apps calling
// in to it but doesn't want to acquire locks in its services while in
// the background.
static void disableBackgroundScheduling(bool disable);
private:
IPCThreadState();
~IPCThreadState();
status_t sendReply(const Parcel& reply, uint32_t flags);
status_t waitForResponse(Parcel *reply,
status_t *acquireResult=NULL);
status_t talkWithDriver(bool doReceive=true);
status_t writeTransactionData(int32_t cmd,
uint32_t binderFlags,
int32_t handle,
uint32_t code,
const Parcel& data,
status_t* statusBuffer);
status_t getAndExecuteCommand();
status_t executeCommand(int32_t command);
void processPendingDerefs();
void clearCaller();
static void threadDestructor(void *st);
static void freeBuffer(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsSize,
void* cookie);
const sp<ProcessState> mProcess;
[// const sp<ProcessState> mProcess
mProcess保存宿主进程
]// const sp<ProcessState> mProcess
const pid_t mMyThreadId;
Vector<BBinder*> mPendingStrongDerefs;
Vector<RefBase::weakref_type*> mPendingWeakDerefs;
Parcel mIn;
Parcel mOut;
[// Parcel mOut
这里mIn和mOut是为了保存传输数据.
mIn和mOut都是Parcel类型的, Parcel类的声明如下:
class Parcel
{
public:
class ReadableBlob;
class WritableBlob;
Parcel();
~Parcel();
const uint8_t* data() const;
size_t dataSize() const;
size_t dataAvail() const;
size_t dataPosition() const;
size_t dataCapacity() const;
status_t setDataSize(size_t size);
void setDataPosition(size_t pos) const;
status_t setDataCapacity(size_t size);
status_t setData(const uint8_t* buffer, size_t len);
status_t appendFrom(const Parcel *parcel,
size_t start, size_t len);
bool pushAllowFds(bool allowFds);
void restoreAllowFds(bool lastValue);
bool hasFileDescriptors() const;
// Writes the RPC header.
status_t writeInterfaceToken(const String16& interface);
// Parses the RPC header, returning true if the interface name
// in the header matches the expected interface from the caller.
//
// Additionally, enforceInterface does part of the work of
// propagating the StrictMode policy mask, populating the current
// IPCThreadState, which as an optimization may optionally be
// passed in.
bool enforceInterface(const String16& interface,
IPCThreadState* threadState = NULL) const;
bool checkInterface(IBinder*) const;
void freeData();
const size_t* objects() const;
size_t objectsCount() const;
status_t errorCheck() const;
void setError(status_t err);
status_t write(const void* data, size_t len);
void* writeInplace(size_t len);
status_t writeUnpadded(const void* data, size_t len);
status_t writeInt32(int32_t val);
status_t writeInt64(int64_t val);
status_t writeFloat(float val);
status_t writeDouble(double val);
status_t writeIntPtr(intptr_t val);
status_t writeCString(const char* str);
status_t writeString8(const String8& str);
status_t writeString16(const String16& str);
status_t writeString16(const char16_t* str, size_t len);
status_t writeStrongBinder(const sp<IBinder>& val);
status_t writeWeakBinder(const wp<IBinder>& val);
status_t write(const Flattenable& val);
template<typename T>
status_t write(const LightFlattenable<T>& val);
// Place a native_handle into the parcel (the native_handle's file-
// descriptors are dup'ed, so it is safe to delete the native_handle
// when this function returns).
// Doesn't take ownership of the native_handle.
status_t writeNativeHandle(const native_handle* handle);
// Place a file descriptor into the parcel. The given fd must remain
// valid for the lifetime of the parcel.
// The Parcel does not take ownership of the given fd unless you ask it to.
status_t writeFileDescriptor(int fd, bool takeOwnership = false);
// Place a file descriptor into the parcel. A dup of the fd is made, which
// will be closed once the parcel is destroyed.
status_t writeDupFileDescriptor(int fd);
// Writes a blob to the parcel.
// If the blob is small, then it is stored in-place, otherwise it is
// transferred by way of an anonymous shared memory region.
// The caller should call release() on the blob after writing its contents.
status_t writeBlob(size_t len, WritableBlob* outBlob);
status_t writeObject(const flat_binder_object& val, bool nullMetaData);
// Like Parcel.java's writeNoException(). Just writes a zero int32.
// Currently the native implementation doesn't do any of the StrictMode
// stack gathering and serialization that the Java implementation does.
status_t writeNoException();
void remove(size_t start, size_t amt);
status_t read(void* outData, size_t len) const;
const void* readInplace(size_t len) const;
int32_t readInt32() const;
status_t readInt32(int32_t *pArg) const;
int64_t readInt64() const;
status_t readInt64(int64_t *pArg) const;
float readFloat() const;
status_t readFloat(float *pArg) const;
double readDouble() const;
status_t readDouble(double *pArg) const;
intptr_t readIntPtr() const;
status_t readIntPtr(intptr_t *pArg) const;
const char* readCString() const;
String8 readString8() const;
String16 readString16() const;
const char16_t* readString16Inplace(size_t* outLen) const;
sp<IBinder> readStrongBinder() const;
wp<IBinder> readWeakBinder() const;
status_t read(Flattenable& val) const;
template<typename T>
status_t read(LightFlattenable<T>& val) const;
// Like Parcel.java's readExceptionCode(). Reads the first int32
// off of a Parcel's header, returning 0 or the negative error
// code on exceptions, but also deals with skipping over rich
// response headers. Callers should use this to read & parse the
// response headers rather than doing it by hand.
int32_t readExceptionCode() const;
// Retrieve native_handle from the parcel. This returns a copy of the
// parcel's native_handle (the caller takes ownership). The caller
// must free the native_handle with native_handle_close() and
// native_handle_delete().
native_handle* readNativeHandle() const;
// Retrieve a file descriptor from the parcel. This returns the raw fd
// in the parcel, which you do not own -- use dup() to get your own copy.
int readFileDescriptor() const;
// Reads a blob from the parcel.
// The caller should call release() on the blob after reading its contents.
status_t readBlob(size_t len, ReadableBlob* outBlob) const;
const flat_binder_object* readObject(bool nullMetaData) const;
// Explicitly close all file descriptors in the parcel.
void closeFileDescriptors();
typedef void (*release_func)(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsSize,
void* cookie);
const uint8_t* ipcData() const;
size_t ipcDataSize() const;
const size_t* ipcObjects() const;
size_t ipcObjectsCount() const;
void ipcSetDataReference(const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsCount,
release_func relFunc, void* relCookie);
void print(TextOutput& to, uint32_t flags = 0) const;
private:
Parcel(const Parcel& o);
Parcel& operator=(const Parcel& o);
status_t finishWrite(size_t len);
void releaseObjects();
void acquireObjects();
status_t growData(size_t len);
status_t restartWrite(size_t desired);
status_t continueWrite(size_t desired);
void freeDataNoInit();
void initState();
void scanForFds() const;
template<class T>
status_t readAligned(T *pArg) const;
template<class T> T readAligned() const;
template<class T>
status_t writeAligned(T val);
status_t mError;
uint8_t* mData;
[// uint8_t* mData
mData指向保存数据的数组
]// uint8_t* mData
size_t mDataSize;
[// size_t mDataSize
mDataSize表示现有保存数据的大小
]// size_t mDataSize
size_t mDataCapacity;
[// size_t mDataCapacity
mDataCapacity表示可以保存数据的容量
]// size_t mDataCapacity
mutable size_t mDataPos;
[// mutable size_t mDataPos
mDataPos现在保存数据的末尾
]// mutable size_t mDataPos
size_t* mObjects;
size_t mObjectsSize;
size_t mObjectsCapacity;
mutable size_t mNextObjectHint;
mutable bool mFdsKnown;
mutable bool mHasFds;
bool mAllowFds;
release_func mOwner;
void* mOwnerCookie;
class Blob {
public:
Blob();
~Blob();
void release();
inline size_t size() const { return mSize; }
protected:
void init(bool mapped, void* data, size_t size);
void clear();
bool mMapped;
void* mData;
size_t mSize;
};
public:
class ReadableBlob : public Blob {
friend class Parcel;
public:
inline const void* data() const { return mData; }
};
class WritableBlob : public Blob {
friend class Parcel;
public:
inline void* data() { return mData; }
};
};
在这里我们就看一下Parcel的构造函数的实现:
Parcel::Parcel()
{
LOG_ALLOC("Parcel %p: constructing", this);
initState();
[
这里会调用initState函数来初始化状态.
void Parcel::initState()
{
LOG_ALLOC("Parcel %p: initState", this);
mError = NO_ERROR;
mData = 0;
mDataSize = 0;
mDataCapacity = 0;
mDataPos = 0;
ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
mObjects = NULL;
mObjectsSize = 0;
mObjectsCapacity = 0;
mNextObjectHint = 0;
mHasFds = false;
mFdsKnown = true;
mAllowFds = true;
mOwner = NULL;
}
]
}
]// Parcel mOut
status_t mLastError;
pid_t mCallingPid;
uid_t mCallingUid;
int32_t mStrictModePolicy;
int32_t mLastTransactionBinderFlags;
};
和ProcessState类一样,我们也从IPCThreadState的self函数开始分析:
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
[// return new IPCThreadState
这里会调用IPCThreadState的构造函数来创建一个IPCThreadState对象.
IPCThreadState的构造函数如下:
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
[// mProcess(ProcessState::self())
这里保存宿主进程
]// mProcess(ProcessState::self())
mMyThreadId(gettid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);
[// pthread_setspecific(gTLS, this)
这里会将创建的IPCThreadState对象设置到ThreadLocal中去.
]// pthread_setspecific(gTLS, this)
clearCaller();
mIn.setDataCapacity(256);
[// mIn.setDataCapacity(256)
在构造函数中,首先调用setDataCapacity函数, setDataCapacity函数的定义如下:
status_t Parcel::setDataCapacity(size_t size)
{
if (size > mDataCapacity) return continueWrite(size);
[// if (size > mDataCapacity) return continueWrite(size)
mDataCapacity保存了当前数据缓冲区的容量, 初始值为0.
所有这里调用continueWrite函数来设置缓冲区的大小
status_t Parcel::continueWrite(size_t desired)
{
// If shrinking, first adjust for any objects that appear
// after the new data size.
size_t objectsSize = mObjectsSize;
[// size_t objectsSize = mObjectsSize
成员变量mObjectsSize表示数据缓冲区中Binder对象数组的大小, mObjectsSize的定义如下:
size_t mObjectsSize;
]// size_t objectsSize = mObjectsSize
if (desired < mDataSize) {
if (desired == 0) {
objectsSize = 0;
} else {
while (objectsSize > 0) {
if (mObjects[objectsSize-1] < desired)
break;
objectsSize--;
}
}
}
[// if (desired < mDataSize)
这里desired表示目标大小, mDataSize表示当前数据的大小.
如果desired < mDataSize,则表示要缩减数据缓冲区的大小.
这时候,如果desired值为0, 则简单的将objectsSize值设置为0, 否则则从后向前删除Binder对象数组的个数.
]// if (desired < mDataSize)
if (mOwner) {
[// if (mOwner)
成员变量mOwner是个函数指针, 定义如下:
release_func mOwner;
release_func声明如下:
typedef void (*release_func)(Parcel* parcel,
const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsSize,
void* cookie);
]// if (mOwner)
// If the size is going to zero, just release the owner's data.
if (desired == 0) {
freeData();
return NO_ERROR;
}
// If there is a different owner, we need to take
// posession.
uint8_t* data = (uint8_t*)malloc(desired);
if (!data) {
mError = NO_MEMORY;
return NO_MEMORY;
}
binder_size_t* objects = NULL;
if (objectsSize) {
objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
if (!objects) {
free(data);
mError = NO_MEMORY;
return NO_MEMORY;
}
// Little hack to only acquire references on objects
// we will be keeping.
size_t oldObjectsSize = mObjectsSize;
mObjectsSize = objectsSize;
acquireObjects();
mObjectsSize = oldObjectsSize;
}
if (mData) {
memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
}
if (objects && mObjects) {
memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
}
//ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
mOwner = NULL;
LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mObjects = objects;
mDataSize = (mDataSize < desired) ? mDataSize : desired;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
mDataCapacity = desired;
mObjectsSize = mObjectsCapacity = objectsSize;
mNextObjectHint = 0;
} else if (mData) {
if (objectsSize < mObjectsSize) {
// Need to release refs on any objects we are dropping.
const sp<ProcessState> proc(ProcessState::self());
for (size_t i=objectsSize; i<mObjectsSize; i++) {
const flat_binder_object* flat
= reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
if (flat->type == BINDER_TYPE_FD) {
// will need to rescan because we may have lopped off the only FDs
mFdsKnown = false;
}
release_object(proc, *flat, this);
}
binder_size_t* objects =
(binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
if (objects) {
mObjects = objects;
}
mObjectsSize = objectsSize;
mNextObjectHint = 0;
}
// We own the data, so we can just do a realloc().
if (desired > mDataCapacity) {
uint8_t* data = (uint8_t*)realloc(mData, desired);
if (data) {
LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocSize -= mDataCapacity;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataCapacity = desired;
} else if (desired > mDataCapacity) {
mError = NO_MEMORY;
return NO_MEMORY;
}
} else {
if (mDataSize > desired) {
mDataSize = desired;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
}
if (mDataPos > desired) {
mDataPos = desired;
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
}
}
} else {
// This is the first data. Easy!
uint8_t* data = (uint8_t*)malloc(desired);
[// uint8_t* data = (uint8_t*)malloc(desired)
这里会申请一段内存,大小为desired字节
]// uint8_t* data = (uint8_t*)malloc(desired)
if (!data) {
mError = NO_MEMORY;
return NO_MEMORY;
}
if(!(mDataCapacity == 0 && mObjects == NULL
&& mObjectsCapacity == 0)) {
ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
}
LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataSize = mDataPos = 0;
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
mDataCapacity = desired;
}
return NO_ERROR;
}
]// if (size > mDataCapacity) return continueWrite(size)
return NO_ERROR;
}
]// mIn.setDataCapacity(256)
mOut.setDataCapacity(256);
}
]// return new IPCThreadState
}
if (gShutdown) return NULL;
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
[// IPCThreadState* IPCThreadState::self()
从IPCThreadState的self函数中可以看出, IPCThreadState也是单例的, 是用ThreadLocal做的.
]// IPCThreadState* IPCThreadState::self()
调用完IPCThreadState的self函数得到一个全局的IPCThreadState对象之后,就可以调用joinThreadPool函数.
joinThreadPool函数的定义如下:
void IPCThreadState::joinThreadPool(bool isMain)
[// void IPCThreadState::joinThreadPool(bool isMain)
joinThreadPool函数有一个参数isMain, 其实默认参数为true;
]// void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
[// mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER)
这里首先发一个BC_ENTER_LOOPER来使得Binder线程池可用.
我们看一下Parcel的writeInt32函数的实现:
status_t Parcel::writeInt32(int32_t val)
{
return writeAligned(val);
[// return writeAligned(val)
writeInt32调用writeAligned函数,writeAligned函数的定义如下:
template<class T> status_t Parcel::writeAligned(T val) {
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
if ((mDataPos+sizeof(val)) <= mDataCapacity) {
restart_write:
*reinterpret_cast<T*>(mData+mDataPos) = val;
[// *reinterpret_cast<T*>(mData+mDataPos) = val
这里将数据写入到数据的末尾
]// *reinterpret_cast<T*>(mData+mDataPos) = val
return finishWrite(sizeof(val));
[// return finishWrite(sizeof(val))
写入完数据后,会调用finishWrite函数来更新长度信息,传入的参数就是刚刚写入的数据的长度
status_t Parcel::finishWrite(size_t len)
{
//printf("Finish write of %d\n", len);
mDataPos += len;
[// mDataPos += len
更新数据的位置信息
]// mDataPos += len
ALOGV("finishWrite Setting data pos of %p to %d\n", this, mDataPos);
if (mDataPos > mDataSize) {
mDataSize = mDataPos;
[// mDataSize = mDataPos
如果需要,更新现有数据的大小
]// mDataSize = mDataPos
ALOGV("finishWrite Setting data size of %p to %d\n", this, mDataSize);
}
//printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
return NO_ERROR;
}
]// return finishWrite(sizeof(val))
}
[// if ((mDataPos+sizeof(val)) <= mDataCapacity)
这里(mDataPos+sizeof(val)) <= mDataCapacity表示剩余容量可以写入数据。
]// if ((mDataPos+sizeof(val)) <= mDataCapacity)
status_t err = growData(sizeof(val));
[// status_t err = growData(sizeof(val))
如果mDataPos+sizeof(val)) > mDataCapacity,则说明现在的容量已经不够写入数据了,则调用growData函数来增加容量。
growData函数的定义如下:
status_t Parcel::growData(size_t len)
{
size_t newSize = ((mDataSize+len)*3)/2;
[// size_t newSize = ((mDataSize+len)*3)/2
这里保存新容量
]// size_t newSize = ((mDataSize+len)*3)/2
return (newSize <= mDataSize)
? (status_t) NO_MEMORY
: continueWrite(newSize);
[// return (newSize <= mDataSize)
这里会调用continueWrite来重新申请容量了。
]// return (newSize <= mDataSize)
}
]// status_t err = growData(sizeof(val))
if (err == NO_ERROR) goto restart_write;
return err;
}
]// return writeAligned(val)
}
]// mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER)
// This thread may have been spawned by a thread that was in the background
// scheduling group, so first we will make sure it is in the foreground
// one to avoid performing an initial transaction in the background.
set_sched_policy(mMyThreadId, SP_FOREGROUND);
status_t result;
do {
processPendingDerefs();
[// processPendingDerefs()
进入循环之后, 首先调用processPendingDerefs函数, processPendingDerefs函数其实是为了延缓执行减少强弱智能指针.
void IPCThreadState::processPendingDerefs()
{
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending = mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
RefBase::weakref_type* refs = mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
numPending = mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
BBinder* obj = mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
}
]// processPendingDerefs()
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();
[// result = getAndExecuteCommand()
这里调用getAndExecuteCommand函数来和Binder驱动进行交互.
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver();
[// result = talkWithDriver()
talkWithDriver函数的作用是发送命令给Binder驱动程序.
status_t IPCThreadState::talkWithDriver(bool doReceive)
[// status_t IPCThreadState::talkWithDriver(bool doReceive)
talkWithDriver的参数doReceive有一个默认参数true
]// status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
[// if (mProcess->mDriverFD <= 0)
这里的mDriverFD保存的就是打开的/dev/binder的文件描述符, 和Binder驱动程序交互就是通过它
]// if (mProcess->mDriverFD <= 0)
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
[// const bool needRead = mIn.dataPosition() >= mIn.dataSize()
Parcel类的dataPosition函数定义如下:
size_t Parcel::dataPosition() const
{
return mDataPos;
}
Parcel类的dataSize函数定义如下:
size_t Parcel::dataSize() const
{
return (mDataSize > mDataPos ? mDataSize : mDataPos);
}
可以看出dataSize返回的是真正的数据的大小。
这里needRead其实是表示是否还要从Binder驱动读取数据。
如果mIn.dataPosition() < mIn.dataSize(),则说明mIn还有数据没读完。
]// const bool needRead = mIn.dataPosition() >= mIn.dataSize()
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
[// const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0
这里是判读是否输出数据给Binder驱动处理,条件就是doReceive为false或者needRead为true。
为什么doReceive要为false呢?doReceive默认参数为true啊。
如果needRead为false,则说明还有数据没读完了,因此就不要再给Binder发数据了。
]// const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
[// bwr.write_buffer = (uintptr_t)mOut.data()
这里将输出数据的大小和数据准备好
]// bwr.write_buffer = (uintptr_t)mOut.data()
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
[// if (doReceive && needRead)
这里是准备读取的。如果needRead为true,则将读取的数据位置设置为mIn的数据缓冲区,大小为mIn的容量。
]// if (doReceive && needRead)
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
[// if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR
这里如果读和写都为0,则不需要读和写,那直接返回吧。
]// if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR
bwr.write_consumed = 0;
bwr.read_consumed = 0;
[// bwr.read_consumed = 0
上面这两句代码,是为了在与Binder驱动程序交互前,将读和写的消耗设置为0.
]// bwr.read_consumed = 0
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(HAVE_ANDROID_OS)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
[// if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
这里通过ioctl函数, 将命令发送给Binder驱动, 并取回Binder驱动发送过来的命令
]// if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
[// if (bwr.write_consumed > 0)
如果bwr.write_consumed > 0,则说明Binder驱动已经消耗了Binder库发送给Binder驱动的命令。
]// if (bwr.write_consumed > 0)
if (bwr.write_consumed < mOut.dataSize())
[// if (bwr.write_consumed < mOut.dataSize())
如果bwr.write_consumed < mOut.dataSize(),则说明没有完全消耗完发送给Binder驱动的命令
]// if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
[// mOut.remove(0, bwr.write_consumed)
调用remove函数来删除部门数据,我们看一下remove函数的实现
void Parcel::remove(size_t start, size_t amt)
{
LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
}
我插!remove函数居然啥也没实现。
]// mOut.remove(0, bwr.write_consumed)
else
mOut.setDataSize(0);
[// mOut.setDataSize(0)
调用setDataSize(0)就相当于将数据都删除。
setDataSize的函数实现如下:
status_t Parcel::setDataSize(size_t size)
{
status_t err;
err = continueWrite(size);
if (err == NO_ERROR) {
mDataSize = size;
ALOGV("setDataSize Setting data size of %p to %d\n", this, mDataSize);
}
return err;
}
]// mOut.setDataSize(0)
}
if (bwr.read_consumed > 0) {
[// if (bwr.read_consumed > 0)
如果bwr.read_consumed > 0, 则说明Binder驱动有返回。这时候就需要读取从Binder驱动返回的命令。
]// if (bwr.read_consumed > 0)
mIn.setDataSize(bwr.read_consumed);
[// mIn.setDataSize(bwr.read_consumed)
这里将mIn的数据大小设置为read_consumed,
]// mIn.setDataSize(bwr.read_consumed)
mIn.setDataPosition(0);
[// mIn.setDataPosition(0)
调用setDataPosition(0)函数,将数据的位置设置为0,指向数据的开头。
]// mIn.setDataPosition(0)
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
]// result = talkWithDriver()
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
[// cmd = mIn.readInt32()
这里取回从Binder驱动程序返回的命令.
但是,这里为什么只取一个????????
readInt32函数的定义如下:
int32_t Parcel::readInt32() const
{
return readAligned<int32_t>();
[// return readAligned<int32_t>()
template<class T> T Parcel::readAligned() const {
T result;
if (readAligned(&result) != NO_ERROR) {
[// if (readAligned(&result) != NO_ERROR)
readAligned函数的定义如下:
template<class T> status_t Parcel::readAligned(T *pArg) const {
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
if ((mDataPos+sizeof(T)) <= mDataSize) {
const void* data = mData+mDataPos;
mDataPos += sizeof(T);
*pArg = *reinterpret_cast<const T*>(data);
return NO_ERROR;
} else {
return NOT_ENOUGH_DATA;
}
}
由readAligned函数的定义可以看出,readAligned函数的实现还是很简单的,就是将数据取出,并更新mDataPos,使其指向下一个数据的位置。
]// if (readAligned(&result) != NO_ERROR)
result = 0;
}
return result;
}
]// return readAligned<int32_t>()
}
]// cmd = mIn.readInt32()
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
result = executeCommand(cmd);
[// result = executeCommand(cmd)
这里调用executeCommand来执行从Binder驱动程序返回的命令.
executeCommand函数的实现如下:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
obj->incStrong(mProcess.get());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);
obj->printRefs();
}
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_RELEASE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_RELEASE from driver on %p", obj);
obj->printRefs();
}
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
// NOTE: This assertion is not valid, because the object may no
// longer exist (thus the (BBinder*)cast above resulting in a different
// memory address).
//ALOG_ASSERT(refs->refBase() == obj,
// "BR_DECREFS: object %p does not match cookie %p (expected %p)",
// refs, obj, refs->refBase());
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
ALOG_ASSERT(success && refs->refBase() == obj,
"BR_ATTEMPT_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
if (curPrio > ANDROID_PRIORITY_NORMAL) {
// We have inherited a reduced priority from the caller, but do not
// want to run in that state in this process. The driver set our
// priority already (though not our scheduling class), so bounce
// it back to the default before invoking the transaction.
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
// We want to use the inherited priority from the caller.
// Ensure this thread is in the background scheduling class,
// since the driver won't modify scheduling classes for us.
// The scheduling group is reset to default by the caller
// once this method returns after the transaction is complete.
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_TRANSACTION thr " << (void*)pthread_self()
<< " / obj " << tr.target.ptr << " / code "
<< TypeCode(tr.code) << ": " << indent << buffer
<< dedent << endl
<< "Data addr = "
<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
<< ", offsets addr="
<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
}
if (tr.target.ptr) {
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
[// error = b->transact(tr.code, buffer, &reply, tr.flags)
这里会调用BBinder的transact函数来传递命令,BBinder的transact函数定义如下
status_t BBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);
[// err = onTransact(code, data, reply, flags)
调用onTransact来调用用户自定义的onTransact实现
]// err = onTransact(code, data, reply, flags)
break;
}
if (reply != NULL) {
reply->setDataPosition(0);
}
return err;
}
]// error = b->transact(tr.code, buffer, &reply, tr.flags)
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
// mCallingPid, origPid, origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
]// result = executeCommand(cmd)
// After executing the command, ensure that the thread is returned to the
// foreground cgroup before rejoining the pool. The driver takes care of
// restoring the priority, but doesn't do anything with cgroups so we
// need to take care of that here in userspace. Note that we do make
// sure to go in the foreground after executing a transaction, but
// there are other callbacks into user code that could have changed
// our group so we want to make absolutely sure it is put back.
set_sched_policy(mMyThreadId, SP_FOREGROUND);
}
return result;
}
]// result = getAndExecuteCommand()
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting", mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
[// while (result != -ECONNREFUSED && result != -EBADF)
这里会进入一个死循环来处理与Binder驱动程序的交互.
WARNNING : IPCThreadState的joinThreadPool有一个死循环.
]// while (result != -ECONNREFUSED && result != -EBADF)
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p\n",
(void*)pthread_self(), getpid(), (void*)result);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
[// talkWithDriver(false)
如果程序运行到这里,则说明Binder库因为各种原因要退出了, 因此这里会发送一个BC_EXIT_LOOPER命令给Binder驱动程序.
]// talkWithDriver(false)
发表评论
-
Activity与WindowManagerService连接的过程(三)
2018-04-16 16:27 623page11 WindowManagerService ... -
Activity与WindowManagerService连接的过程(二)
2018-04-16 16:36 770page6 WindowManagerGlobal的getW ... -
Activity与WindowManagerService连接的过程(一)
2018-04-16 16:21 987page1 Activity组件在 ... -
Activity的ViewRoot的创建过程(三)
2017-11-06 14:25 742page7 在这篇文章里, 我们分析一下W类的构造过程. W ... -
Activity的ViewRoot的创建过程(二)
2017-11-06 14:29 942page4 我们看一下ViewRootImpl对象的创 ... -
Activity的ViewRoot的创建过程(一)
2017-11-06 14:27 1080page1 当一个Activity第一次激活的时候会为该Ac ... -
Activity的Window和WindowManager的创建过程(三)
2017-07-05 11:49 1335page9 在这里我们分析一下DisplayManager的 ... -
Activity的Window和WindowManager的创建过程(二)
2017-07-05 11:31 548page5 在这篇文章中, 我们分析一下ContextImp ... -
Activity的Window和WindowManager的创建过程(一)
2017-07-05 11:27 607page1 我们开始分析一下Activity的Window和 ... -
Acitivy创建Context的过程(二)
2017-06-21 14:11 514page4 在这里我们分析一下ContextImpl的ini ... -
Acitivy创建Context的过程(一)
2017-06-21 14:15 639page1 从本篇文章开始,我们分析一下Activity创建 ... -
应用程序进程与SurfaceFlinger的连接过程
2017-06-21 11:49 1063我们从SurfaceComposerClient对象的创建开始 ... -
Android源码之SurfaceFlinger的启动(三)
2017-04-20 11:09 1046page11 我们来看一下SurfaceFlinger ... -
Android源码之SurfaceFlinger的启动(二)
2017-04-18 15:15 883page6 我们看一下Thread的run函数的实现: ... -
Android源码之SurfaceFlinger的启动(一)
2017-04-17 10:07 1000page1 在Android系统中, 显示系统在底层是通过S ... -
Android源码之Zygote
2015-12-15 11:45 519当ActivityManagerService启动一个应用程序 ... -
Android源码之Binder(五)
2015-12-04 09:19 1515Service组件在启动时,需要将自己注册到Service M ... -
Android源码之Binder(三)
2015-12-04 09:17 910{ int ret; struct binder_pr ... -
Android源码之Binder(二)
2015-12-04 09:15 550分析完Binder驱动程序的打开和内存分配的过程之后,我们看一 ... -
Android源码之Binder(一)
2015-12-04 09:12 996在Android系统中,进程间通信使用的是Binder机制。B ...
相关推荐
### Android底层源码分析_Binder #### 总体概述 Binder是Android系统中实现进程间通信(IPC)的核心机制之一。其设计模式基于客户端-服务器(Client-Server)架构,其中提供服务的一方称为Server进程,请求服务的...
需要在系统源码下编译,或者提取出对应的头文件亦可。这里需要注意Android4.x以后系统SeLinux如果打开,系统级需要配置对应的sepolicy才能使用。测试阶段推荐直接setenforce 0关闭鉴权即可
"安卓Android源码——Android Launcher 源码修改可编译.zip" 这个标题表明我们关注的是Android操作系统的源代码,特别是与启动器(Launcher)相关的部分。Launcher是用户与Android系统交互的主要界面,它管理应用...
### Android系统的Binder机制之一——Service_Manager #### 一、引言 在深入探讨Android系统中的Binder机制之前,我们首先简要回顾一下Binder机制的基本概念及其重要性。Android系统基于Linux内核,但在进程间通信...
本压缩包提供了基于Android的Native层Binder通信的源码示例,主要包含了两个关键部分:BP(Binder Proxy)和BN(Binder Node)。BP通常位于客户端,负责将调用转化为跨进程的消息,而BN则在服务端,处理这些消息并...
4. 参考开源项目:许多开源应用和库都基于Android源码进行了扩展,从中可以学习最佳实践。 四、源码学习价值 1. 解决问题:遇到系统级问题时,源码是查找问题根源的直接途径。 2. 性能优化:理解底层运行机制,有助...
本用例源码着重展示了Binder在Native层的应用,适用于Android 2.3版本。 ### 1. Binder基础 Binder在Android中扮演了接口代理的角色,允许不同进程之间的对象调用对方的方法,实现了跨进程的远程方法调用(Remote ...
《Android源码开发实战》是针对Android系统源码分析与应用开发的一本深入实践书籍,主要面向具有Android开发基础的开发者。"3.05"可能是书籍中的一个章节或者更新版本,具体的内容我们无法直接从文件名中获取,但...
Android源码是一个庞大的开源项目,包含了操作系统、中间件层、应用程序框架以及丰富的API库,是开发者深入了解Android系统工作原理和定制化开发的关键。这个名为"android源码.zip"的压缩包很可能包含了完整的...
四、源码学习路径 1. **Kernel源码分析**:理解设备驱动和内核模块,了解Android对Linux内核的定制。 2. **System Server源码**:探究系统服务的工作机制,如系统状态管理、服务启动等。 3. **UI框架源码**:分析...
本篇文章将详细解析"Android应用源码之音乐播放器源码.zip"中的关键知识点,帮助你理解如何构建一个功能完善的Android音乐播放器。 1. **多媒体框架(MediaPlayer)**: Android提供了`MediaPlayer`类来处理音频和...
在Android源码中,与C/C++相关的Binder机制实现主要集中在以下文件中: - `frameworks\base\include\utils\IInterface.h`: 定义了一个通用接口`IInterface`,所有通过Binder机制暴露的服务都需继承该接口。 - `...
这个名为“安卓Android源码——安卓Android电子麦克风”的压缩包很可能包含了一份关于Android系统中电子麦克风部分的源代码,这对于开发者来说是一个宝贵的资源。下面我们将深入探讨Android源码以及与电子麦克风相关...
在Android开发领域,深入理解Android源码是提升技术能力的关键步骤。这份名为"Android源码分享"的资源包,显然为开发者提供了一个宝贵的探索平台。它不仅包含了Android系统的源代码,还提供了学习方法和课程体系流程...
总的来说,"Binder实战大荟萃源码"是深入学习Android IPC机制的理想资料,无论你是初学者还是经验丰富的开发者,都能从中受益。通过实际操练和理解源码,你将能够更好地设计和实现高效的Android服务和客户端,提升...
### Android深入浅出之Binder机制 #### 一、概述 Android系统的Binder机制是实现进程间通信(IPC)的核心技术之一,对于理解Android底层框架和服务之间的交互至关重要。本文将基于邓凡平博客中关于Binder机制的...
通过 Intent、Binder、ContentProvider 等组件,Android 应用之间可以进行通信。源码分析有助于我们理解这些通信机制的底层实现。 8. **权限管理系统** Android 的权限模型是保障应用安全的重要部分。分析权限...
本资源"Android源码——快捷查询源码.zip"包含的是一个帮助开发者快速查询Android源码的工具或指南,旨在提高开发效率,深入理解Android系统的运行机制。以下是基于这个主题的详细知识点: 1. **Android源码结构**...
《安卓Android源码深度探索——osChina.zip剖析》 Android操作系统是全球最受欢迎的智能手机平台之一,它的开源特性吸引了无数开发者进行深入研究。本篇文章将基于"osChina.zip"这一压缩包,探讨Android的源码,...