1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54#include <linux/fdtable.h>
55#include <linux/file.h>
56#include <linux/freezer.h>
57#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
64#include <linux/debugfs.h>
65#include <linux/rbtree.h>
66#include <linux/sched/signal.h>
67#include <linux/sched/mm.h>
68#include <linux/seq_file.h>
69#include <linux/uaccess.h>
70#include <linux/pid_namespace.h>
71#include <linux/security.h>
72#include <linux/spinlock.h>
73#include <linux/ratelimit.h>
74#include <linux/syscalls.h>
75#include <linux/task_work.h>
76
77#include <uapi/linux/android/binder.h>
78
79#include <asm/cacheflush.h>
80
81#include "binder_alloc.h"
82#include "binder_internal.h"
83#include "binder_trace.h"
84
85static HLIST_HEAD(binder_deferred_list);
86static DEFINE_MUTEX(binder_deferred_lock);
87
88static HLIST_HEAD(binder_devices);
89static HLIST_HEAD(binder_procs);
90static DEFINE_MUTEX(binder_procs_lock);
91
92static HLIST_HEAD(binder_dead_nodes);
93static DEFINE_SPINLOCK(binder_dead_nodes_lock);
94
95static struct dentry *binder_debugfs_dir_entry_root;
96static struct dentry *binder_debugfs_dir_entry_proc;
97static atomic_t binder_last_id;
98
99static int proc_show(struct seq_file *m, void *unused);
100DEFINE_SHOW_ATTRIBUTE(proc);
101
102/* This is only defined in include/asm-arm/sizes.h */
103#ifndef SZ_1K
104#define SZ_1K 0x400
105#endif
106
107#ifndef SZ_4M
108#define SZ_4M 0x400000
109#endif
110
111#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
112
113enum {
114 BINDER_DEBUG_USER_ERROR = 1U << 0,
115 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
116 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
117 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
118 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
119 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
120 BINDER_DEBUG_READ_WRITE = 1U << 6,
121 BINDER_DEBUG_USER_REFS = 1U << 7,
122 BINDER_DEBUG_THREADS = 1U << 8,
123 BINDER_DEBUG_TRANSACTION = 1U << 9,
124 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
125 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
126 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
127 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
128 BINDER_DEBUG_SPINLOCKS = 1U << 14,
129};
130static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
131 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
132module_param_named(debug_mask, binder_debug_mask, uint, 0644);
133
134static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
135module_param_named(devices, binder_devices_param, charp, 0444);
136
137static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
138static int binder_stop_on_user_error;
139
140static int binder_set_stop_on_user_error(const char *val,
141 const struct kernel_param *kp)
142{
143 int ret;
144
145 ret = param_set_int(val, kp);
146 if (binder_stop_on_user_error < 2)
147 wake_up(&binder_user_error_wait);
148 return ret;
149}
150module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
151 param_get_int, &binder_stop_on_user_error, 0644);
152
153#define binder_debug(mask, x...) \
154 do { \
155 if (binder_debug_mask & mask) \
156 pr_info_ratelimited(x); \
157 } while (0)
158
159#define binder_user_error(x...) \
160 do { \
161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
162 pr_info_ratelimited(x); \
163 if (binder_stop_on_user_error) \
164 binder_stop_on_user_error = 2; \
165 } while (0)
166
167#define to_flat_binder_object(hdr) \
168 container_of(hdr, struct flat_binder_object, hdr)
169
170#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
171
172#define to_binder_buffer_object(hdr) \
173 container_of(hdr, struct binder_buffer_object, hdr)
174
175#define to_binder_fd_array_object(hdr) \
176 container_of(hdr, struct binder_fd_array_object, hdr)
177
178enum binder_stat_types {
179 BINDER_STAT_PROC,
180 BINDER_STAT_THREAD,
181 BINDER_STAT_NODE,
182 BINDER_STAT_REF,
183 BINDER_STAT_DEATH,
184 BINDER_STAT_TRANSACTION,
185 BINDER_STAT_TRANSACTION_COMPLETE,
186 BINDER_STAT_COUNT
187};
188
189struct binder_stats {
190 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
191 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
192 atomic_t obj_created[BINDER_STAT_COUNT];
193 atomic_t obj_deleted[BINDER_STAT_COUNT];
194};
195
196static struct binder_stats binder_stats;
197
198static inline void binder_stats_deleted(enum binder_stat_types type)
199{
200 atomic_inc(&binder_stats.obj_deleted[type]);
201}
202
203static inline void binder_stats_created(enum binder_stat_types type)
204{
205 atomic_inc(&binder_stats.obj_created[type]);
206}
207
208struct binder_transaction_log_entry {
209 int debug_id;
210 int debug_id_done;
211 int call_type;
212 int from_proc;
213 int from_thread;
214 int target_handle;
215 int to_proc;
216 int to_thread;
217 int to_node;
218 int data_size;
219 int offsets_size;
220 int return_error_line;
221 uint32_t return_error;
222 uint32_t return_error_param;
223 const char *context_name;
224};
225struct binder_transaction_log {
226 atomic_t cur;
227 bool full;
228 struct binder_transaction_log_entry entry[32];
229};
230static struct binder_transaction_log binder_transaction_log;
231static struct binder_transaction_log binder_transaction_log_failed;
232
233static struct binder_transaction_log_entry *binder_transaction_log_add(
234 struct binder_transaction_log *log)
235{
236 struct binder_transaction_log_entry *e;
237 unsigned int cur = atomic_inc_return(&log->cur);
238
239 if (cur >= ARRAY_SIZE(log->entry))
240 log->full = true;
241 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 WRITE_ONCE(e->debug_id_done, 0);
243 /*
244 * write-barrier to synchronize access to e->debug_id_done.
245 * We make sure the initialized 0 value is seen before
246 * memset() other fields are zeroed by memset.
247 */
248 smp_wmb();
249 memset(e, 0, sizeof(*e));
250 return e;
251}
252
253/**
254 * struct binder_work - work enqueued on a worklist
255 * @entry: node enqueued on list
256 * @type: type of work to be performed
257 *
258 * There are separate work lists for proc, thread, and node (async).
259 */
260struct binder_work {
261 struct list_head entry;
262
263 enum {
264 BINDER_WORK_TRANSACTION = 1,
265 BINDER_WORK_TRANSACTION_COMPLETE,
266 BINDER_WORK_RETURN_ERROR,
267 BINDER_WORK_NODE,
268 BINDER_WORK_DEAD_BINDER,
269 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
270 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
271 } type;
272};
273
274struct binder_error {
275 struct binder_work work;
276 uint32_t cmd;
277};
278
279/**
280 * struct binder_node - binder node bookkeeping
281 * @debug_id: unique ID for debugging
282 * (invariant after initialized)
283 * @lock: lock for node fields
284 * @work: worklist element for node work
285 * (protected by @proc->inner_lock)
286 * @rb_node: element for proc->nodes tree
287 * (protected by @proc->inner_lock)
288 * @dead_node: element for binder_dead_nodes list
289 * (protected by binder_dead_nodes_lock)
290 * @proc: binder_proc that owns this node
291 * (invariant after initialized)
292 * @refs: list of references on this node
293 * (protected by @lock)
294 * @internal_strong_refs: used to take strong references when
295 * initiating a transaction
296 * (protected by @proc->inner_lock if @proc
297 * and by @lock)
298 * @local_weak_refs: weak user refs from local process
299 * (protected by @proc->inner_lock if @proc
300 * and by @lock)
301 * @local_strong_refs: strong user refs from local process
302 * (protected by @proc->inner_lock if @proc
303 * and by @lock)
304 * @tmp_refs: temporary kernel refs
305 * (protected by @proc->inner_lock while @proc
306 * is valid, and by binder_dead_nodes_lock
307 * if @proc is NULL. During inc/dec and node release
308 * it is also protected by @lock to provide safety
309 * as the node dies and @proc becomes NULL)
310 * @ptr: userspace pointer for node
311 * (invariant, no lock needed)
312 * @cookie: userspace cookie for node
313 * (invariant, no lock needed)
314 * @has_strong_ref: userspace notified of strong ref
315 * (protected by @proc->inner_lock if @proc
316 * and by @lock)
317 * @pending_strong_ref: userspace has acked notification of strong ref
318 * (protected by @proc->inner_lock if @proc
319 * and by @lock)
320 * @has_weak_ref: userspace notified of weak ref
321 * (protected by @proc->inner_lock if @proc
322 * and by @lock)
323 * @pending_weak_ref: userspace has acked notification of weak ref
324 * (protected by @proc->inner_lock if @proc
325 * and by @lock)
326 * @has_async_transaction: async transaction to node in progress
327 * (protected by @lock)
328 * @accept_fds: file descriptor operations supported for node
329 * (invariant after initialized)
330 * @min_priority: minimum scheduling priority
331 * (invariant after initialized)
332 * @txn_security_ctx: require sender's security context
333 * (invariant after initialized)
334 * @async_todo: list of async work items
335 * (protected by @proc->inner_lock)
336 *
337 * Bookkeeping structure for binder nodes.
338 */
339struct binder_node {
340 int debug_id;
341 spinlock_t lock;
342 struct binder_work work;
343 union {
344 struct rb_node rb_node;
345 struct hlist_node dead_node;
346 };
347 struct binder_proc *proc;
348 struct hlist_head refs;
349 int internal_strong_refs;
350 int local_weak_refs;
351 int local_strong_refs;
352 int tmp_refs;
353 binder_uintptr_t ptr;
354 binder_uintptr_t cookie;
355 struct {
356 /*
357 * bitfield elements protected by
358 * proc inner_lock
359 */
360 u8 has_strong_ref:1;
361 u8 pending_strong_ref:1;
362 u8 has_weak_ref:1;
363 u8 pending_weak_ref:1;
364 };
365 struct {
366 /*
367 * invariant after initialization
368 */
369 u8 accept_fds:1;
370 u8 txn_security_ctx:1;
371 u8 min_priority;
372 };
373 bool has_async_transaction;
374 struct list_head async_todo;
375};
376
377struct binder_ref_death {
378 /**
379 * @work: worklist element for death notifications
380 * (protected by inner_lock of the proc that
381 * this ref belongs to)
382 */
383 struct binder_work work;
384 binder_uintptr_t cookie;
385};
386
387/**
388 * struct binder_ref_data - binder_ref counts and id
389 * @debug_id: unique ID for the ref
390 * @desc: unique userspace handle for ref
391 * @strong: strong ref count (debugging only if not locked)
392 * @weak: weak ref count (debugging only if not locked)
393 *
394 * Structure to hold ref count and ref id information. Since
395 * the actual ref can only be accessed with a lock, this structure
396 * is used to return information about the ref to callers of
397 * ref inc/dec functions.
398 */
399struct binder_ref_data {
400 int debug_id;
401 uint32_t desc;
402 int strong;
403 int weak;
404};
405
406/**
407 * struct binder_ref - struct to track references on nodes
408 * @data: binder_ref_data containing id, handle, and current refcounts
409 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
410 * @rb_node_node: node for lookup by @node in proc's rb_tree
411 * @node_entry: list entry for node->refs list in target node
412 * (protected by @node->lock)
413 * @proc: binder_proc containing ref
414 * @node: binder_node of target node. When cleaning up a
415 * ref for deletion in binder_cleanup_ref, a non-NULL
416 * @node indicates the node must be freed
417 * @death: pointer to death notification (ref_death) if requested
418 * (protected by @node->lock)
419 *
420 * Structure to track references from procA to target node (on procB). This
421 * structure is unsafe to access without holding @proc->outer_lock.
422 */
423struct binder_ref {
424 /* Lookups needed: */
425 /* node + proc => ref (transaction) */
426 /* desc + proc => ref (transaction, inc/dec ref) */
427 /* node => refs + procs (proc exit) */
428 struct binder_ref_data data;
429 struct rb_node rb_node_desc;
430 struct rb_node rb_node_node;
431 struct hlist_node node_entry;
432 struct binder_proc *proc;
433 struct binder_node *node;
434 struct binder_ref_death *death;
435};
436
437enum binder_deferred_state {
438 BINDER_DEFERRED_FLUSH = 0x01,
439 BINDER_DEFERRED_RELEASE = 0x02,
440};
441
442/**
443 * struct binder_proc - binder process bookkeeping
444 * @proc_node: element for binder_procs list
445 * @threads: rbtree of binder_threads in this proc
446 * (protected by @inner_lock)
447 * @nodes: rbtree of binder nodes associated with
448 * this proc ordered by node->ptr
449 * (protected by @inner_lock)
450 * @refs_by_desc: rbtree of refs ordered by ref->desc
451 * (protected by @outer_lock)
452 * @refs_by_node: rbtree of refs ordered by ref->node
453 * (protected by @outer_lock)
454 * @waiting_threads: threads currently waiting for proc work
455 * (protected by @inner_lock)
456 * @pid PID of group_leader of process
457 * (invariant after initialized)
458 * @tsk task_struct for group_leader of process
459 * (invariant after initialized)
460 * @deferred_work_node: element for binder_deferred_list
461 * (protected by binder_deferred_lock)
462 * @deferred_work: bitmap of deferred work to perform
463 * (protected by binder_deferred_lock)
464 * @is_dead: process is dead and awaiting free
465 * when outstanding transactions are cleaned up
466 * (protected by @inner_lock)
467 * @todo: list of work for this process
468 * (protected by @inner_lock)
469 * @stats: per-process binder statistics
470 * (atomics, no lock needed)
471 * @delivered_death: list of delivered death notification
472 * (protected by @inner_lock)
473 * @max_threads: cap on number of binder threads
474 * (protected by @inner_lock)
475 * @requested_threads: number of binder threads requested but not
476 * yet started. In current implementation, can
477 * only be 0 or 1.
478 * (protected by @inner_lock)
479 * @requested_threads_started: number binder threads started
480 * (protected by @inner_lock)
481 * @tmp_ref: temporary reference to indicate proc is in use
482 * (protected by @inner_lock)
483 * @default_priority: default scheduler priority
484 * (invariant after initialized)
485 * @debugfs_entry: debugfs node
486 * @alloc: binder allocator bookkeeping
487 * @context: binder_context for this proc
488 * (invariant after initialized)
489 * @inner_lock: can nest under outer_lock and/or node lock
490 * @outer_lock: no nesting under innor or node lock
491 * Lock order: 1) outer, 2) node, 3) inner
492 *
493 * Bookkeeping structure for binder processes
494 */
495struct binder_proc {
496 struct hlist_node proc_node;
497 struct rb_root threads;
498 struct rb_root nodes;
499 struct rb_root refs_by_desc;
500 struct rb_root refs_by_node;
501 struct list_head waiting_threads;
502 int pid;
503 struct task_struct *tsk;
504 struct hlist_node deferred_work_node;
505 int deferred_work;
506 bool is_dead;
507
508 struct list_head todo;
509 struct binder_stats stats;
510 struct list_head delivered_death;
511 int max_threads;
512 int requested_threads;
513 int requested_threads_started;
514 int tmp_ref;
515 long default_priority;
516 struct dentry *debugfs_entry;
517 struct binder_alloc alloc;
518 struct binder_context *context;
519 spinlock_t inner_lock;
520 spinlock_t outer_lock;
521};
522
523enum {
524 BINDER_LOOPER_STATE_REGISTERED = 0x01,
525 BINDER_LOOPER_STATE_ENTERED = 0x02,
526 BINDER_LOOPER_STATE_EXITED = 0x04,
527 BINDER_LOOPER_STATE_INVALID = 0x08,
528 BINDER_LOOPER_STATE_WAITING = 0x10,
529 BINDER_LOOPER_STATE_POLL = 0x20,
530};
531
532/**
533 * struct binder_thread - binder thread bookkeeping
534 * @proc: binder process for this thread
535 * (invariant after initialization)
536 * @rb_node: element for proc->threads rbtree
537 * (protected by @proc->inner_lock)
538 * @waiting_thread_node: element for @proc->waiting_threads list
539 * (protected by @proc->inner_lock)
540 * @pid: PID for this thread
541 * (invariant after initialization)
542 * @looper: bitmap of looping state
543 * (only accessed by this thread)
544 * @looper_needs_return: looping thread needs to exit driver
545 * (no lock needed)
546 * @transaction_stack: stack of in-progress transactions for this thread
547 * (protected by @proc->inner_lock)
548 * @todo: list of work to do for this thread
549 * (protected by @proc->inner_lock)
550 * @process_todo: whether work in @todo should be processed
551 * (protected by @proc->inner_lock)
552 * @return_error: transaction errors reported by this thread
553 * (only accessed by this thread)
554 * @reply_error: transaction errors reported by target thread
555 * (protected by @proc->inner_lock)
556 * @wait: wait queue for thread work
557 * @stats: per-thread statistics
558 * (atomics, no lock needed)
559 * @tmp_ref: temporary reference to indicate thread is in use
560 * (atomic since @proc->inner_lock cannot
561 * always be acquired)
562 * @is_dead: thread is dead and awaiting free
563 * when outstanding transactions are cleaned up
564 * (protected by @proc->inner_lock)
565 *
566 * Bookkeeping structure for binder threads.
567 */
568struct binder_thread {
569 struct binder_proc *proc;
570 struct rb_node rb_node;
571 struct list_head waiting_thread_node;
572 int pid;
573 int looper; /* only modified by this thread */
574 bool looper_need_return; /* can be written by other thread */
575 struct binder_transaction *transaction_stack;
576 struct list_head todo;
577 bool process_todo;
578 struct binder_error return_error;
579 struct binder_error reply_error;
580 wait_queue_head_t wait;
581 struct binder_stats stats;
582 atomic_t tmp_ref;
583 bool is_dead;
584};
585
586/**
587 * struct binder_txn_fd_fixup - transaction fd fixup list element
588 * @fixup_entry: list entry
589 * @file: struct file to be associated with new fd
590 * @offset: offset in buffer data to this fixup
591 *
592 * List element for fd fixups in a transaction. Since file
593 * descriptors need to be allocated in the context of the
594 * target process, we pass each fd to be processed in this
595 * struct.
596 */
597struct binder_txn_fd_fixup {
598 struct list_head fixup_entry;
599 struct file *file;
600 size_t offset;
601};
602
603struct binder_transaction {
604 int debug_id;
605 struct binder_work work;
606 struct binder_thread *from;
607 struct binder_transaction *from_parent;
608 struct binder_proc *to_proc;
609 struct binder_thread *to_thread;
610 struct binder_transaction *to_parent;
611 unsigned need_reply:1;
612 /* unsigned is_dead:1; */ /* not used at the moment */
613
614 struct binder_buffer *buffer;
615 unsigned int code;
616 unsigned int flags;
617 long priority;
618 long saved_priority;
619 kuid_t sender_euid;
620 struct list_head fd_fixups;
621 binder_uintptr_t security_ctx;
622 /**
623 * @lock: protects @from, @to_proc, and @to_thread
624 *
625 * @from, @to_proc, and @to_thread can be set to NULL
626 * during thread teardown
627 */
628 spinlock_t lock;
629};
630
631/**
632 * struct binder_object - union of flat binder object types
633 * @hdr: generic object header
634 * @fbo: binder object (nodes and refs)
635 * @fdo: file descriptor object
636 * @bbo: binder buffer pointer
637 * @fdao: file descriptor array
638 *
639 * Used for type-independent object copies
640 */
641struct binder_object {
642 union {
643 struct binder_object_header hdr;
644 struct flat_binder_object fbo;
645 struct binder_fd_object fdo;
646 struct binder_buffer_object bbo;
647 struct binder_fd_array_object fdao;
648 };
649};
650
651/**
652 * binder_proc_lock() - Acquire outer lock for given binder_proc
653 * @proc: struct binder_proc to acquire
654 *
655 * Acquires proc->outer_lock. Used to protect binder_ref
656 * structures associated with the given proc.
657 */
658#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
659static void
660_binder_proc_lock(struct binder_proc *proc, int line)
661 __acquires(&proc->outer_lock)
662{
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_lock(&proc->outer_lock);
666}
667
668/**
669 * binder_proc_unlock() - Release spinlock for given binder_proc
670 * @proc: struct binder_proc to acquire
671 *
672 * Release lock acquired via binder_proc_lock()
673 */
674#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
675static void
676_binder_proc_unlock(struct binder_proc *proc, int line)
677 __releases(&proc->outer_lock)
678{
679 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line);
681 spin_unlock(&proc->outer_lock);
682}
683
684/**
685 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
686 * @proc: struct binder_proc to acquire
687 *
688 * Acquires proc->inner_lock. Used to protect todo lists
689 */
690#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
691static void
692_binder_inner_proc_lock(struct binder_proc *proc, int line)
693 __acquires(&proc->inner_lock)
694{
695 binder_debug(BINDER_DEBUG_SPINLOCKS,
696 "%s: line=%d\n", __func__, line);
697 spin_lock(&proc->inner_lock);
698}
699
700/**
701 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
702 * @proc: struct binder_proc to acquire
703 *
704 * Release lock acquired via binder_inner_proc_lock()
705 */
706#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
707static void
708_binder_inner_proc_unlock(struct binder_proc *proc, int line)
709 __releases(&proc->inner_lock)
710{
711 binder_debug(BINDER_DEBUG_SPINLOCKS,
712 "%s: line=%d\n", __func__, line);
713 spin_unlock(&proc->inner_lock);
714}
715
716/**
717 * binder_node_lock() - Acquire spinlock for given binder_node
718 * @node: struct binder_node to acquire
719 *
720 * Acquires node->lock. Used to protect binder_node fields
721 */
722#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
723static void
724_binder_node_lock(struct binder_node *node, int line)
725 __acquires(&node->lock)
726{
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_lock(&node->lock);
730}
731
732/**
733 * binder_node_unlock() - Release spinlock for given binder_proc
734 * @node: struct binder_node to acquire
735 *
736 * Release lock acquired via binder_node_lock()
737 */
738#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
739static void
740_binder_node_unlock(struct binder_node *node, int line)
741 __releases(&node->lock)
742{
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_unlock(&node->lock);
746}
747
748/**
749 * binder_node_inner_lock() - Acquire node and inner locks
750 * @node: struct binder_node to acquire
751 *
752 * Acquires node->lock. If node->proc also acquires
753 * proc->inner_lock. Used to protect binder_node fields
754 */
755#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
756static void
757_binder_node_inner_lock(struct binder_node *node, int line)
758 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
759{
760 binder_debug(BINDER_DEBUG_SPINLOCKS,
761 "%s: line=%d\n", __func__, line);
762 spin_lock(&node->lock);
763 if (node->proc)
764 binder_inner_proc_lock(node->proc);
765 else
766 /* annotation for sparse */
767 __acquire(&node->proc->inner_lock);
768}
769
770/**
771 * binder_node_unlock() - Release node and inner locks
772 * @node: struct binder_node to acquire
773 *
774 * Release lock acquired via binder_node_lock()
775 */
776#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
777static void
778_binder_node_inner_unlock(struct binder_node *node, int line)
779 __releases(&node->lock) __releases(&node->proc->inner_lock)
780{
781 struct binder_proc *proc = node->proc;
782
783 binder_debug(BINDER_DEBUG_SPINLOCKS,
784 "%s: line=%d\n", __func__, line);
785 if (proc)
786 binder_inner_proc_unlock(proc);
787 else
788 /* annotation for sparse */
789 __release(&node->proc->inner_lock);
790 spin_unlock(&node->lock);
791}
792
793static bool binder_worklist_empty_ilocked(struct list_head *list)
794{
795 return list_empty(list);
796}
797
798/**
799 * binder_worklist_empty() - Check if no items on the work list
800 * @proc: binder_proc associated with list
801 * @list: list to check
802 *
803 * Return: true if there are no items on list, else false
804 */
805static bool binder_worklist_empty(struct binder_proc *proc,
806 struct list_head *list)
807{
808 bool ret;
809
810 binder_inner_proc_lock(proc);
811 ret = binder_worklist_empty_ilocked(list);
812 binder_inner_proc_unlock(proc);
813 return ret;
814}
815
816/**
817 * binder_enqueue_work_ilocked() - Add an item to the work list
818 * @work: struct binder_work to add to list
819 * @target_list: list to add work to
820 *
821 * Adds the work to the specified list. Asserts that work
822 * is not already on a list.
823 *
824 * Requires the proc->inner_lock to be held.
825 */
826static void
827binder_enqueue_work_ilocked(struct binder_work *work,
828 struct list_head *target_list)
829{
830 BUG_ON(target_list == NULL);
831 BUG_ON(work->entry.next && !list_empty(&work->entry));
832 list_add_tail(&work->entry, target_list);
833}
834
835/**
836 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
837 * @thread: thread to queue work to
838 * @work: struct binder_work to add to list
839 *
840 * Adds the work to the todo list of the thread. Doesn't set the process_todo
841 * flag, which means that (if it wasn't already set) the thread will go to
842 * sleep without handling this work when it calls read.
843 *
844 * Requires the proc->inner_lock to be held.
845 */
846static void
847binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
848 struct binder_work *work)
849{
850 WARN_ON(!list_empty(&thread->waiting_thread_node));
851 binder_enqueue_work_ilocked(work, &thread->todo);
852}
853
854/**
855 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
856 * @thread: thread to queue work to
857 * @work: struct binder_work to add to list
858 *
859 * Adds the work to the todo list of the thread, and enables processing
860 * of the todo queue.
861 *
862 * Requires the proc->inner_lock to be held.
863 */
864static void
865binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
866 struct binder_work *work)
867{
868 WARN_ON(!list_empty(&thread->waiting_thread_node));
869 binder_enqueue_work_ilocked(work, &thread->todo);
870 thread->process_todo = true;
871}
872
873/**
874 * binder_enqueue_thread_work() - Add an item to the thread work list
875 * @thread: thread to queue work to
876 * @work: struct binder_work to add to list
877 *
878 * Adds the work to the todo list of the thread, and enables processing
879 * of the todo queue.
880 */
881static void
882binder_enqueue_thread_work(struct binder_thread *thread,
883 struct binder_work *work)
884{
885 binder_inner_proc_lock(thread->proc);
886 binder_enqueue_thread_work_ilocked(thread, work);
887 binder_inner_proc_unlock(thread->proc);
888}
889
890static void
891binder_dequeue_work_ilocked(struct binder_work *work)
892{
893 list_del_init(&work->entry);
894}
895
896/**
897 * binder_dequeue_work() - Removes an item from the work list
898 * @proc: binder_proc associated with list
899 * @work: struct binder_work to remove from list
900 *
901 * Removes the specified work item from whatever list it is on.
902 * Can safely be called if work is not on any list.
903 */
904static void
905binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
906{
907 binder_inner_proc_lock(proc);
908 binder_dequeue_work_ilocked(work);
909 binder_inner_proc_unlock(proc);
910}
911
912static struct binder_work *binder_dequeue_work_head_ilocked(
913 struct list_head *list)
914{
915 struct binder_work *w;
916
917 w = list_first_entry_or_null(list, struct binder_work, entry);
918 if (w)
919 list_del_init(&w->entry);
920 return w;
921}
922
923/**
924 * binder_dequeue_work_head() - Dequeues the item at head of list
925 * @proc: binder_proc associated with list
926 * @list: list to dequeue head
927 *
928 * Removes the head of the list if there are items on the list
929 *
930 * Return: pointer dequeued binder_work, NULL if list was empty
931 */
932static struct binder_work *binder_dequeue_work_head(
933 struct binder_proc *proc,
934 struct list_head *list)
935{
936 struct binder_work *w;
937
938 binder_inner_proc_lock(proc);
939 w = binder_dequeue_work_head_ilocked(list);
940 binder_inner_proc_unlock(proc);
941 return w;
942}
943
944static void
945binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
946static void binder_free_thread(struct binder_thread *thread);
947static void binder_free_proc(struct binder_proc *proc);
948static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
949
950static bool binder_has_work_ilocked(struct binder_thread *thread,
951 bool do_proc_work)
952{
953 return thread->process_todo ||
954 thread->looper_need_return ||
955 (do_proc_work &&
956 !binder_worklist_empty_ilocked(&thread->proc->todo));
957}
958
959static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
960{
961 bool has_work;
962
963 binder_inner_proc_lock(thread->proc);
964 has_work = binder_has_work_ilocked(thread, do_proc_work);
965 binder_inner_proc_unlock(thread->proc);
966
967 return has_work;
968}
969
970static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
971{
972 return !thread->transaction_stack &&
973 binder_worklist_empty_ilocked(&thread->todo) &&
974 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
975 BINDER_LOOPER_STATE_REGISTERED));
976}
977
978static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
979 bool sync)
980{
981 struct rb_node *n;
982 struct binder_thread *thread;
983
984 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
985 thread = rb_entry(n, struct binder_thread, rb_node);
986 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
987 binder_available_for_proc_work_ilocked(thread)) {
988 if (sync)
989 wake_up_interruptible_sync(&thread->wait);
990 else
991 wake_up_interruptible(&thread->wait);
992 }
993 }
994}
995
996/**
997 * binder_select_thread_ilocked() - selects a thread for doing proc work.
998 * @proc: process to select a thread from
999 *
1000 * Note that calling this function moves the thread off the waiting_threads
1001 * list, so it can only be woken up by the caller of this function, or a
1002 * signal. Therefore, callers *should* always wake up the thread this function
1003 * returns.
1004 *
1005 * Return: If there's a thread currently waiting for process work,
1006 * returns that thread. Otherwise returns NULL.
1007 */
1008static struct binder_thread *
1009binder_select_thread_ilocked(struct binder_proc *proc)
1010{
1011 struct binder_thread *thread;
1012
1013 assert_spin_locked(&proc->inner_lock);
1014 thread = list_first_entry_or_null(&proc->waiting_threads,
1015 struct binder_thread,
1016 waiting_thread_node);
1017
1018 if (thread)
1019 list_del_init(&thread->waiting_thread_node);
1020
1021 return thread;
1022}
1023
1024/**
1025 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1026 * @proc: process to wake up a thread in
1027 * @thread: specific thread to wake-up (may be NULL)
1028 * @sync: whether to do a synchronous wake-up
1029 *
1030 * This function wakes up a thread in the @proc process.
1031 * The caller may provide a specific thread to wake-up in
1032 * the @thread parameter. If @thread is NULL, this function
1033 * will wake up threads that have called poll().
1034 *
1035 * Note that for this function to work as expected, callers
1036 * should first call binder_select_thread() to find a thread
1037 * to handle the work (if they don't have a thread already),
1038 * and pass the result into the @thread parameter.
1039 */
1040static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1041 struct binder_thread *thread,
1042 bool sync)
1043{
1044 assert_spin_locked(&proc->inner_lock);
1045
1046 if (thread) {
1047 if (sync)
1048 wake_up_interruptible_sync(&thread->wait);
1049 else
1050 wake_up_interruptible(&thread->wait);
1051 return;
1052 }
1053
1054 /* Didn't find a thread waiting for proc work; this can happen
1055 * in two scenarios:
1056 * 1. All threads are busy handling transactions
1057 * In that case, one of those threads should call back into
1058 * the kernel driver soon and pick up this work.
1059 * 2. Threads are using the (e)poll interface, in which case
1060 * they may be blocked on the waitqueue without having been
1061 * added to waiting_threads. For this case, we just iterate
1062 * over all threads not handling transaction work, and
1063 * wake them all up. We wake all because we don't know whether
1064 * a thread that called into (e)poll is handling non-binder
1065 * work currently.
1066 */
1067 binder_wakeup_poll_threads_ilocked(proc, sync);
1068}
1069
1070static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1071{
1072 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1073
1074 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1075}
1076
1077static void binder_set_nice(long nice)
1078{
1079 long min_nice;
1080
1081 if (can_nice(current, nice)) {
1082 set_user_nice(current, nice);
1083 return;
1084 }
1085 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1086 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1087 "%d: nice value %ld not allowed use %ld instead\n",
1088 current->pid, nice, min_nice);
1089 set_user_nice(current, min_nice);
1090 if (min_nice <= MAX_NICE)
1091 return;
1092 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1093}
1094
1095static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1096 binder_uintptr_t ptr)
1097{
1098 struct rb_node *n = proc->nodes.rb_node;
1099 struct binder_node *node;
1100
1101 assert_spin_locked(&proc->inner_lock);
1102
1103 while (n) {
1104 node = rb_entry(n, struct binder_node, rb_node);
1105
1106 if (ptr < node->ptr)
1107 n = n->rb_left;
1108 else if (ptr > node->ptr)
1109 n = n->rb_right;
1110 else {
1111 /*
1112 * take an implicit weak reference
1113 * to ensure node stays alive until
1114 * call to binder_put_node()
1115 */
1116 binder_inc_node_tmpref_ilocked(node);
1117 return node;
1118 }
1119 }
1120 return NULL;
1121}
1122
1123static struct binder_node *binder_get_node(struct binder_proc *proc,
1124 binder_uintptr_t ptr)
1125{
1126 struct binder_node *node;
1127
1128 binder_inner_proc_lock(proc);
1129 node = binder_get_node_ilocked(proc, ptr);
1130 binder_inner_proc_unlock(proc);
1131 return node;
1132}
1133
1134static struct binder_node *binder_init_node_ilocked(
1135 struct binder_proc *proc,
1136 struct binder_node *new_node,
1137 struct flat_binder_object *fp)
1138{
1139 struct rb_node **p = &proc->nodes.rb_node;
1140 struct rb_node *parent = NULL;
1141 struct binder_node *node;
1142 binder_uintptr_t ptr = fp ? fp->binder : 0;
1143 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1144 __u32 flags = fp ? fp->flags : 0;
1145
1146 assert_spin_locked(&proc->inner_lock);
1147
1148 while (*p) {
1149
1150 parent = *p;
1151 node = rb_entry(parent, struct binder_node, rb_node);
1152
1153 if (ptr < node->ptr)
1154 p = &(*p)->rb_left;
1155 else if (ptr > node->ptr)
1156 p = &(*p)->rb_right;
1157 else {
1158 /*
1159 * A matching node is already in
1160 * the rb tree. Abandon the init
1161 * and return it.
1162 */
1163 binder_inc_node_tmpref_ilocked(node);
1164 return node;
1165 }
1166 }
1167 node = new_node;
1168 binder_stats_created(BINDER_STAT_NODE);
1169 node->tmp_refs++;
1170 rb_link_node(&node->rb_node, parent, p);
1171 rb_insert_color(&node->rb_node, &proc->nodes);
1172 node->debug_id = atomic_inc_return(&binder_last_id);
1173 node->proc = proc;
1174 node->ptr = ptr;
1175 node->cookie = cookie;
1176 node->work.type = BINDER_WORK_NODE;
1177 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1178 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1179 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1180 spin_lock_init(&node->lock);
1181 INIT_LIST_HEAD(&node->work.entry);
1182 INIT_LIST_HEAD(&node->async_todo);
1183 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1184 "%d:%d node %d u%016llx c%016llx created\n",
1185 proc->pid, current->pid, node->debug_id,
1186 (u64)node->ptr, (u64)node->cookie);
1187
1188 return node;
1189}
1190
1191static struct binder_node *binder_new_node(struct binder_proc *proc,
1192 struct flat_binder_object *fp)
1193{
1194 struct binder_node *node;
1195 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1196
1197 if (!new_node)
1198 return NULL;
1199 binder_inner_proc_lock(proc);
1200 node = binder_init_node_ilocked(proc, new_node, fp);
1201 binder_inner_proc_unlock(proc);
1202 if (node != new_node)
1203 /*
1204 * The node was already added by another thread
1205 */
1206 kfree(new_node);
1207
1208 return node;
1209}
1210
1211static void binder_free_node(struct binder_node *node)
1212{
1213 kfree(node);
1214 binder_stats_deleted(BINDER_STAT_NODE);
1215}
1216
1217static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1218 int internal,
1219 struct list_head *target_list)
1220{
1221 struct binder_proc *proc = node->proc;
1222
1223 assert_spin_locked(&node->lock);
1224 if (proc)
1225 assert_spin_locked(&proc->inner_lock);
1226 if (strong) {
1227 if (internal) {
1228 if (target_list == NULL &&
1229 node->internal_strong_refs == 0 &&
1230 !(node->proc &&
1231 node == node->proc->context->binder_context_mgr_node &&
1232 node->has_strong_ref)) {
1233 pr_err("invalid inc strong node for %d\n",
1234 node->debug_id);
1235 return -EINVAL;
1236 }
1237 node->internal_strong_refs++;
1238 } else
1239 node->local_strong_refs++;
1240 if (!node->has_strong_ref && target_list) {
1241 struct binder_thread *thread = container_of(target_list,
1242 struct binder_thread, todo);
1243 binder_dequeue_work_ilocked(&node->work);
1244 BUG_ON(&thread->todo != target_list);
1245 binder_enqueue_deferred_thread_work_ilocked(thread,
1246 &node->work);
1247 }
1248 } else {
1249 if (!internal)
1250 node->local_weak_refs++;
1251 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1252 if (target_list == NULL) {
1253 pr_err("invalid inc weak node for %d\n",
1254 node->debug_id);
1255 return -EINVAL;
1256 }
1257 /*
1258 * See comment above
1259 */
1260 binder_enqueue_work_ilocked(&node->work, target_list);
1261 }
1262 }
1263 return 0;
1264}
1265
1266static int binder_inc_node(struct binder_node *node, int strong, int internal,
1267 struct list_head *target_list)
1268{
1269 int ret;
1270
1271 binder_node_inner_lock(node);
1272 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1273 binder_node_inner_unlock(node);
1274
1275 return ret;
1276}
1277
1278static bool binder_dec_node_nilocked(struct binder_node *node,
1279 int strong, int internal)
1280{
1281 struct binder_proc *proc = node->proc;
1282
1283 assert_spin_locked(&node->lock);
1284 if (proc)
1285 assert_spin_locked(&proc->inner_lock);
1286 if (strong) {
1287 if (internal)
1288 node->internal_strong_refs--;
1289 else
1290 node->local_strong_refs--;
1291 if (node->local_strong_refs || node->internal_strong_refs)
1292 return false;
1293 } else {
1294 if (!internal)
1295 node->local_weak_refs--;
1296 if (node->local_weak_refs || node->tmp_refs ||
1297 !hlist_empty(&node->refs))
1298 return false;
1299 }
1300
1301 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1302 if (list_empty(&node->work.entry)) {
1303 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1304 binder_wakeup_proc_ilocked(proc);
1305 }
1306 } else {
1307 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1308 !node->local_weak_refs && !node->tmp_refs) {
1309 if (proc) {
1310 binder_dequeue_work_ilocked(&node->work);
1311 rb_erase(&node->rb_node, &proc->nodes);
1312 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1313 "refless node %d deleted\n",
1314 node->debug_id);
1315 } else {
1316 BUG_ON(!list_empty(&node->work.entry));
1317 spin_lock(&binder_dead_nodes_lock);
1318 /*
1319 * tmp_refs could have changed so
1320 * check it again
1321 */
1322 if (node->tmp_refs) {
1323 spin_unlock(&binder_dead_nodes_lock);
1324 return false;
1325 }
1326 hlist_del(&node->dead_node);
1327 spin_unlock(&binder_dead_nodes_lock);
1328 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1329 "dead node %d deleted\n",
1330 node->debug_id);
1331 }
1332 return true;
1333 }
1334 }
1335 return false;
1336}
1337
1338static void binder_dec_node(struct binder_node *node, int strong, int internal)
1339{
1340 bool free_node;
1341
1342 binder_node_inner_lock(node);
1343 free_node = binder_dec_node_nilocked(node, strong, internal);
1344 binder_node_inner_unlock(node);
1345 if (free_node)
1346 binder_free_node(node);
1347}
1348
1349static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1350{
1351 /*
1352 * No call to binder_inc_node() is needed since we
1353 * don't need to inform userspace of any changes to
1354 * tmp_refs
1355 */
1356 node->tmp_refs++;
1357}
1358
1359/**
1360 * binder_inc_node_tmpref() - take a temporary reference on node
1361 * @node: node to reference
1362 *
1363 * Take reference on node to prevent the node from being freed
1364 * while referenced only by a local variable. The inner lock is
1365 * needed to serialize with the node work on the queue (which
1366 * isn't needed after the node is dead). If the node is dead
1367 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1368 * node->tmp_refs against dead-node-only cases where the node
1369 * lock cannot be acquired (eg traversing the dead node list to
1370 * print nodes)
1371 */
1372static void binder_inc_node_tmpref(struct binder_node *node)
1373{
1374 binder_node_lock(node);
1375 if (node->proc)
1376 binder_inner_proc_lock(node->proc);
1377 else
1378 spin_lock(&binder_dead_nodes_lock);
1379 binder_inc_node_tmpref_ilocked(node);
1380 if (node->proc)
1381 binder_inner_proc_unlock(node->proc);
1382 else
1383 spin_unlock(&binder_dead_nodes_lock);
1384 binder_node_unlock(node);
1385}
1386
1387/**
1388 * binder_dec_node_tmpref() - remove a temporary reference on node
1389 * @node: node to reference
1390 *
1391 * Release temporary reference on node taken via binder_inc_node_tmpref()
1392 */
1393static void binder_dec_node_tmpref(struct binder_node *node)
1394{
1395 bool free_node;
1396
1397 binder_node_inner_lock(node);
1398 if (!node->proc)
1399 spin_lock(&binder_dead_nodes_lock);
1400 else
1401 __acquire(&binder_dead_nodes_lock);
1402 node->tmp_refs--;
1403 BUG_ON(node->tmp_refs < 0);
1404 if (!node->proc)
1405 spin_unlock(&binder_dead_nodes_lock);
1406 else
1407 __release(&binder_dead_nodes_lock);
1408 /*
1409 * Call binder_dec_node() to check if all refcounts are 0
1410 * and cleanup is needed. Calling with strong=0 and internal=1
1411 * causes no actual reference to be released in binder_dec_node().
1412 * If that changes, a change is needed here too.
1413 */
1414 free_node = binder_dec_node_nilocked(node, 0, 1);
1415 binder_node_inner_unlock(node);
1416 if (free_node)
1417 binder_free_node(node);
1418}
1419
1420static void binder_put_node(struct binder_node *node)
1421{
1422 binder_dec_node_tmpref(node);
1423}
1424
1425static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1426 u32 desc, bool need_strong_ref)
1427{
1428 struct rb_node *n = proc->refs_by_desc.rb_node;
1429 struct binder_ref *ref;
1430
1431 while (n) {
1432 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1433
1434 if (desc < ref->data.desc) {
1435 n = n->rb_left;
1436 } else if (desc > ref->data.desc) {
1437 n = n->rb_right;
1438 } else if (need_strong_ref && !ref->data.strong) {
1439 binder_user_error("tried to use weak ref as strong ref\n");
1440 return NULL;
1441 } else {
1442 return ref;
1443 }
1444 }
1445 return NULL;
1446}
1447
1448/**
1449 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1450 * @proc: binder_proc that owns the ref
1451 * @node: binder_node of target
1452 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1453 *
1454 * Look up the ref for the given node and return it if it exists
1455 *
1456 * If it doesn't exist and the caller provides a newly allocated
1457 * ref, initialize the fields of the newly allocated ref and insert
1458 * into the given proc rb_trees and node refs list.
1459 *
1460 * Return: the ref for node. It is possible that another thread
1461 * allocated/initialized the ref first in which case the
1462 * returned ref would be different than the passed-in
1463 * new_ref. new_ref must be kfree'd by the caller in
1464 * this case.
1465 */
1466static struct binder_ref *binder_get_ref_for_node_olocked(
1467 struct binder_proc *proc,
1468 struct binder_node *node,
1469 struct binder_ref *new_ref)
1470{
1471 struct binder_context *context = proc->context;
1472 struct rb_node **p = &proc->refs_by_node.rb_node;
1473 struct rb_node *parent = NULL;
1474 struct binder_ref *ref;
1475 struct rb_node *n;
1476
1477 while (*p) {
1478 parent = *p;
1479 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1480
1481 if (node < ref->node)
1482 p = &(*p)->rb_left;
1483 else if (node > ref->node)
1484 p = &(*p)->rb_right;
1485 else
1486 return ref;
1487 }
1488 if (!new_ref)
1489 return NULL;
1490
1491 binder_stats_created(BINDER_STAT_REF);
1492 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1493 new_ref->proc = proc;
1494 new_ref->node = node;
1495 rb_link_node(&new_ref->rb_node_node, parent, p);
1496 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1497
1498 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1499 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1500 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1501 if (ref->data.desc > new_ref->data.desc)
1502 break;
1503 new_ref->data.desc = ref->data.desc + 1;
1504 }
1505
1506 p = &proc->refs_by_desc.rb_node;
1507 while (*p) {
1508 parent = *p;
1509 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1510
1511 if (new_ref->data.desc < ref->data.desc)
1512 p = &(*p)->rb_left;
1513 else if (new_ref->data.desc > ref->data.desc)
1514 p = &(*p)->rb_right;
1515 else
1516 BUG();
1517 }
1518 rb_link_node(&new_ref->rb_node_desc, parent, p);
1519 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1520
1521 binder_node_lock(node);
1522 hlist_add_head(&new_ref->node_entry, &node->refs);
1523
1524 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1525 "%d new ref %d desc %d for node %d\n",
1526 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1527 node->debug_id);
1528 binder_node_unlock(node);
1529 return new_ref;
1530}
1531
1532static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1533{
1534 bool delete_node = false;
1535
1536 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1537 "%d delete ref %d desc %d for node %d\n",
1538 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1539 ref->node->debug_id);
1540
1541 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1542 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1543
1544 binder_node_inner_lock(ref->node);
1545 if (ref->data.strong)
1546 binder_dec_node_nilocked(ref->node, 1, 1);
1547
1548 hlist_del(&ref->node_entry);
1549 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1550 binder_node_inner_unlock(ref->node);
1551 /*
1552 * Clear ref->node unless we want the caller to free the node
1553 */
1554 if (!delete_node) {
1555 /*
1556 * The caller uses ref->node to determine
1557 * whether the node needs to be freed. Clear
1558 * it since the node is still alive.
1559 */
1560 ref->node = NULL;
1561 }
1562
1563 if (ref->death) {
1564 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1565 "%d delete ref %d desc %d has death notification\n",
1566 ref->proc->pid, ref->data.debug_id,
1567 ref->data.desc);
1568 binder_dequeue_work(ref->proc, &ref->death->work);
1569 binder_stats_deleted(BINDER_STAT_DEATH);
1570 }
1571 binder_stats_deleted(BINDER_STAT_REF);
1572}
1573
1574/**
1575 * binder_inc_ref_olocked() - increment the ref for given handle
1576 * @ref: ref to be incremented
1577 * @strong: if true, strong increment, else weak
1578 * @target_list: list to queue node work on
1579 *
1580 * Increment the ref. @ref->proc->outer_lock must be held on entry
1581 *
1582 * Return: 0, if successful, else errno
1583 */
1584static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1585 struct list_head *target_list)
1586{
1587 int ret;
1588
1589 if (strong) {
1590 if (ref->data.strong == 0) {
1591 ret = binder_inc_node(ref->node, 1, 1, target_list);
1592 if (ret)
1593 return ret;
1594 }
1595 ref->data.strong++;
1596 } else {
1597 if (ref->data.weak == 0) {
1598 ret = binder_inc_node(ref->node, 0, 1, target_list);
1599 if (ret)
1600 return ret;
1601 }
1602 ref->data.weak++;
1603 }
1604 return 0;
1605}
1606
1607/**
1608 * binder_dec_ref() - dec the ref for given handle
1609 * @ref: ref to be decremented
1610 * @strong: if true, strong decrement, else weak
1611 *
1612 * Decrement the ref.
1613 *
1614 * Return: true if ref is cleaned up and ready to be freed
1615 */
1616static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1617{
1618 if (strong) {
1619 if (ref->data.strong == 0) {
1620 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1621 ref->proc->pid, ref->data.debug_id,
1622 ref->data.desc, ref->data.strong,
1623 ref->data.weak);
1624 return false;
1625 }
1626 ref->data.strong--;
1627 if (ref->data.strong == 0)
1628 binder_dec_node(ref->node, strong, 1);
1629 } else {
1630 if (ref->data.weak == 0) {
1631 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1632 ref->proc->pid, ref->data.debug_id,
1633 ref->data.desc, ref->data.strong,
1634 ref->data.weak);
1635 return false;
1636 }
1637 ref->data.weak--;
1638 }
1639 if (ref->data.strong == 0 && ref->data.weak == 0) {
1640 binder_cleanup_ref_olocked(ref);
1641 return true;
1642 }
1643 return false;
1644}
1645
1646/**
1647 * binder_get_node_from_ref() - get the node from the given proc/desc
1648 * @proc: proc containing the ref
1649 * @desc: the handle associated with the ref
1650 * @need_strong_ref: if true, only return node if ref is strong
1651 * @rdata: the id/refcount data for the ref
1652 *
1653 * Given a proc and ref handle, return the associated binder_node
1654 *
1655 * Return: a binder_node or NULL if not found or not strong when strong required
1656 */
1657static struct binder_node *binder_get_node_from_ref(
1658 struct binder_proc *proc,
1659 u32 desc, bool need_strong_ref,
1660 struct binder_ref_data *rdata)
1661{
1662 struct binder_node *node;
1663 struct binder_ref *ref;
1664
1665 binder_proc_lock(proc);
1666 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1667 if (!ref)
1668 goto err_no_ref;
1669 node = ref->node;
1670 /*
1671 * Take an implicit reference on the node to ensure
1672 * it stays alive until the call to binder_put_node()
1673 */
1674 binder_inc_node_tmpref(node);
1675 if (rdata)
1676 *rdata = ref->data;
1677 binder_proc_unlock(proc);
1678
1679 return node;
1680
1681err_no_ref:
1682 binder_proc_unlock(proc);
1683 return NULL;
1684}
1685
1686/**
1687 * binder_free_ref() - free the binder_ref
1688 * @ref: ref to free
1689 *
1690 * Free the binder_ref. Free the binder_node indicated by ref->node
1691 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1692 */
1693static void binder_free_ref(struct binder_ref *ref)
1694{
1695 if (ref->node)
1696 binder_free_node(ref->node);
1697 kfree(ref->death);
1698 kfree(ref);
1699}
1700
1701/**
1702 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1703 * @proc: proc containing the ref
1704 * @desc: the handle associated with the ref
1705 * @increment: true=inc reference, false=dec reference
1706 * @strong: true=strong reference, false=weak reference
1707 * @rdata: the id/refcount data for the ref
1708 *
1709 * Given a proc and ref handle, increment or decrement the ref
1710 * according to "increment" arg.
1711 *
1712 * Return: 0 if successful, else errno
1713 */
1714static int binder_update_ref_for_handle(struct binder_proc *proc,
1715 uint32_t desc, bool increment, bool strong,
1716 struct binder_ref_data *rdata)
1717{
1718 int ret = 0;
1719 struct binder_ref *ref;
1720 bool delete_ref = false;
1721
1722 binder_proc_lock(proc);
1723 ref = binder_get_ref_olocked(proc, desc, strong);
1724 if (!ref) {
1725 ret = -EINVAL;
1726 goto err_no_ref;
1727 }
1728 if (increment)
1729 ret = binder_inc_ref_olocked(ref, strong, NULL);
1730 else
1731 delete_ref = binder_dec_ref_olocked(ref, strong);
1732
1733 if (rdata)
1734 *rdata = ref->data;
1735 binder_proc_unlock(proc);
1736
1737 if (delete_ref)
1738 binder_free_ref(ref);
1739 return ret;
1740
1741err_no_ref:
1742 binder_proc_unlock(proc);
1743 return ret;
1744}
1745
1746/**
1747 * binder_dec_ref_for_handle() - dec the ref for given handle
1748 * @proc: proc containing the ref
1749 * @desc: the handle associated with the ref
1750 * @strong: true=strong reference, false=weak reference
1751 * @rdata: the id/refcount data for the ref
1752 *
1753 * Just calls binder_update_ref_for_handle() to decrement the ref.
1754 *
1755 * Return: 0 if successful, else errno
1756 */
1757static int binder_dec_ref_for_handle(struct binder_proc *proc,
1758 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1759{
1760 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1761}
1762
1763
1764/**
1765 * binder_inc_ref_for_node() - increment the ref for given proc/node
1766 * @proc: proc containing the ref
1767 * @node: target node
1768 * @strong: true=strong reference, false=weak reference
1769 * @target_list: worklist to use if node is incremented
1770 * @rdata: the id/refcount data for the ref
1771 *
1772 * Given a proc and node, increment the ref. Create the ref if it
1773 * doesn't already exist
1774 *
1775 * Return: 0 if successful, else errno
1776 */
1777static int binder_inc_ref_for_node(struct binder_proc *proc,
1778 struct binder_node *node,
1779 bool strong,
1780 struct list_head *target_list,
1781 struct binder_ref_data *rdata)
1782{
1783 struct binder_ref *ref;
1784 struct binder_ref *new_ref = NULL;
1785 int ret = 0;
1786
1787 binder_proc_lock(proc);
1788 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1789 if (!ref) {
1790 binder_proc_unlock(proc);
1791 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1792 if (!new_ref)
1793 return -ENOMEM;
1794 binder_proc_lock(proc);
1795 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1796 }
1797 ret = binder_inc_ref_olocked(ref, strong, target_list);
1798 *rdata = ref->data;
1799 binder_proc_unlock(proc);
1800 if (new_ref && ref != new_ref)
1801 /*
1802 * Another thread created the ref first so
1803 * free the one we allocated
1804 */
1805 kfree(new_ref);
1806 return ret;
1807}
1808
1809static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1810 struct binder_transaction *t)
1811{
1812 BUG_ON(!target_thread);
1813 assert_spin_locked(&target_thread->proc->inner_lock);
1814 BUG_ON(target_thread->transaction_stack != t);
1815 BUG_ON(target_thread->transaction_stack->from != target_thread);
1816 target_thread->transaction_stack =
1817 target_thread->transaction_stack->from_parent;
1818 t->from = NULL;
1819}
1820
1821/**
1822 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1823 * @thread: thread to decrement
1824 *
1825 * A thread needs to be kept alive while being used to create or
1826 * handle a transaction. binder_get_txn_from() is used to safely
1827 * extract t->from from a binder_transaction and keep the thread
1828 * indicated by t->from from being freed. When done with that
1829 * binder_thread, this function is called to decrement the
1830 * tmp_ref and free if appropriate (thread has been released
1831 * and no transaction being processed by the driver)
1832 */
1833static void binder_thread_dec_tmpref(struct binder_thread *thread)
1834{
1835 /*
1836 * atomic is used to protect the counter value while
1837 * it cannot reach zero or thread->is_dead is false
1838 */
1839 binder_inner_proc_lock(thread->proc);
1840 atomic_dec(&thread->tmp_ref);
1841 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1842 binder_inner_proc_unlock(thread->proc);
1843 binder_free_thread(thread);
1844 return;
1845 }
1846 binder_inner_proc_unlock(thread->proc);
1847}
1848
1849/**
1850 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1851 * @proc: proc to decrement
1852 *
1853 * A binder_proc needs to be kept alive while being used to create or
1854 * handle a transaction. proc->tmp_ref is incremented when
1855 * creating a new transaction or the binder_proc is currently in-use
1856 * by threads that are being released. When done with the binder_proc,
1857 * this function is called to decrement the counter and free the
1858 * proc if appropriate (proc has been released, all threads have
1859 * been released and not currenly in-use to process a transaction).
1860 */
1861static void binder_proc_dec_tmpref(struct binder_proc *proc)
1862{
1863 binder_inner_proc_lock(proc);
1864 proc->tmp_ref--;
1865 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1866 !proc->tmp_ref) {
1867 binder_inner_proc_unlock(proc);
1868 binder_free_proc(proc);
1869 return;
1870 }
1871 binder_inner_proc_unlock(proc);
1872}
1873
1874/**
1875 * binder_get_txn_from() - safely extract the "from" thread in transaction
1876 * @t: binder transaction for t->from
1877 *
1878 * Atomically return the "from" thread and increment the tmp_ref
1879 * count for the thread to ensure it stays alive until
1880 * binder_thread_dec_tmpref() is called.
1881 *
1882 * Return: the value of t->from
1883 */
1884static struct binder_thread *binder_get_txn_from(
1885 struct binder_transaction *t)
1886{
1887 struct binder_thread *from;
1888
1889 spin_lock(&t->lock);
1890 from = t->from;
1891 if (from)
1892 atomic_inc(&from->tmp_ref);
1893 spin_unlock(&t->lock);
1894 return from;
1895}
1896
1897/**
1898 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1899 * @t: binder transaction for t->from
1900 *
1901 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1902 * to guarantee that the thread cannot be released while operating on it.
1903 * The caller must call binder_inner_proc_unlock() to release the inner lock
1904 * as well as call binder_dec_thread_txn() to release the reference.
1905 *
1906 * Return: the value of t->from
1907 */
1908static struct binder_thread *binder_get_txn_from_and_acq_inner(
1909 struct binder_transaction *t)
1910 __acquires(&t->from->proc->inner_lock)
1911{
1912 struct binder_thread *from;
1913
1914 from = binder_get_txn_from(t);
1915 if (!from) {
1916 __acquire(&from->proc->inner_lock);
1917 return NULL;
1918 }
1919 binder_inner_proc_lock(from->proc);
1920 if (t->from) {
1921 BUG_ON(from != t->from);
1922 return from;
1923 }
1924 binder_inner_proc_unlock(from->proc);
1925 __acquire(&from->proc->inner_lock);
1926 binder_thread_dec_tmpref(from);
1927 return NULL;
1928}
1929
1930/**
1931 * binder_free_txn_fixups() - free unprocessed fd fixups
1932 * @t: binder transaction for t->from
1933 *
1934 * If the transaction is being torn down prior to being
1935 * processed by the target process, free all of the
1936 * fd fixups and fput the file structs. It is safe to
1937 * call this function after the fixups have been
1938 * processed -- in that case, the list will be empty.
1939 */
1940static void binder_free_txn_fixups(struct binder_transaction *t)
1941{
1942 struct binder_txn_fd_fixup *fixup, *tmp;
1943
1944 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1945 fput(fixup->file);
1946 list_del(&fixup->fixup_entry);
1947 kfree(fixup);
1948 }
1949}
1950
1951static void binder_free_transaction(struct binder_transaction *t)
1952{
1953 if (t->buffer)
1954 t->buffer->transaction = NULL;
1955 binder_free_txn_fixups(t);
1956 kfree(t);
1957 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1958}
1959
1960static void binder_send_failed_reply(struct binder_transaction *t,
1961 uint32_t error_code)
1962{
1963 struct binder_thread *target_thread;
1964 struct binder_transaction *next;
1965
1966 BUG_ON(t->flags & TF_ONE_WAY);
1967 while (1) {
1968 target_thread = binder_get_txn_from_and_acq_inner(t);
1969 if (target_thread) {
1970 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1971 "send failed reply for transaction %d to %d:%d\n",
1972 t->debug_id,
1973 target_thread->proc->pid,
1974 target_thread->pid);
1975
1976 binder_pop_transaction_ilocked(target_thread, t);
1977 if (target_thread->reply_error.cmd == BR_OK) {
1978 target_thread->reply_error.cmd = error_code;
1979 binder_enqueue_thread_work_ilocked(
1980 target_thread,
1981 &target_thread->reply_error.work);
1982 wake_up_interruptible(&target_thread->wait);
1983 } else {
1984 /*
1985 * Cannot get here for normal operation, but
1986 * we can if multiple synchronous transactions
1987 * are sent without blocking for responses.
1988 * Just ignore the 2nd error in this case.
1989 */
1990 pr_warn("Unexpected reply error: %u\n",
1991 target_thread->reply_error.cmd);
1992 }
1993 binder_inner_proc_unlock(target_thread->proc);
1994 binder_thread_dec_tmpref(target_thread);
1995 binder_free_transaction(t);
1996 return;
1997 } else {
1998 __release(&target_thread->proc->inner_lock);
1999 }
2000 next = t->from_parent;
2001
2002 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2003 "send failed reply for transaction %d, target dead\n",
2004 t->debug_id);
2005
2006 binder_free_transaction(t);
2007 if (next == NULL) {
2008 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2009 "reply failed, no target thread at root\n");
2010 return;
2011 }
2012 t = next;
2013 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2014 "reply failed, no target thread -- retry %d\n",
2015 t->debug_id);
2016 }
2017}
2018
2019/**
2020 * binder_cleanup_transaction() - cleans up undelivered transaction
2021 * @t: transaction that needs to be cleaned up
2022 * @reason: reason the transaction wasn't delivered
2023 * @error_code: error to return to caller (if synchronous call)
2024 */
2025static void binder_cleanup_transaction(struct binder_transaction *t,
2026 const char *reason,
2027 uint32_t error_code)
2028{
2029 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2030 binder_send_failed_reply(t, error_code);
2031 } else {
2032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2033 "undelivered transaction %d, %s\n",
2034 t->debug_id, reason);
2035 binder_free_transaction(t);
2036 }
2037}
2038
2039/**
2040 * binder_get_object() - gets object and checks for valid metadata
2041 * @proc: binder_proc owning the buffer
2042 * @buffer: binder_buffer that we're parsing.
2043 * @offset: offset in the @buffer at which to validate an object.
2044 * @object: struct binder_object to read into
2045 *
2046 * Return: If there's a valid metadata object at @offset in @buffer, the
2047 * size of that object. Otherwise, it returns zero. The object
2048 * is read into the struct binder_object pointed to by @object.
2049 */
2050static size_t binder_get_object(struct binder_proc *proc,
2051 struct binder_buffer *buffer,
2052 unsigned long offset,
2053 struct binder_object *object)
2054{
2055 size_t read_size;
2056 struct binder_object_header *hdr;
2057 size_t object_size = 0;
2058
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size);
2064
2065 /* Ok, now see if we read a complete object. */
2066 hdr = &object->hdr;
2067 switch (hdr->type) {
2068 case BINDER_TYPE_BINDER:
2069 case BINDER_TYPE_WEAK_BINDER:
2070 case BINDER_TYPE_HANDLE:
2071 case BINDER_TYPE_WEAK_HANDLE:
2072 object_size = sizeof(struct flat_binder_object);
2073 break;
2074 case BINDER_TYPE_FD:
2075 object_size = sizeof(struct binder_fd_object);
2076 break;
2077 case BINDER_TYPE_PTR:
2078 object_size = sizeof(struct binder_buffer_object);
2079 break;
2080 case BINDER_TYPE_FDA:
2081 object_size = sizeof(struct binder_fd_array_object);
2082 break;
2083 default:
2084 return 0;
2085 }
2086 if (offset <= buffer->data_size - object_size &&
2087 buffer->data_size >= object_size)
2088 return object_size;
2089 else
2090 return 0;
2091}
2092
2093/**
2094 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2095 * @proc: binder_proc owning the buffer
2096 * @b: binder_buffer containing the object
2097 * @object: struct binder_object to read into
2098 * @index: index in offset array at which the binder_buffer_object is
2099 * located
2100 * @start_offset: points to the start of the offset array
2101 * @object_offsetp: offset of @object read from @b
2102 * @num_valid: the number of valid offsets in the offset array
2103 *
2104 * Return: If @index is within the valid range of the offset array
2105 * described by @start and @num_valid, and if there's a valid
2106 * binder_buffer_object at the offset found in index @index
2107 * of the offset array, that object is returned. Otherwise,
2108 * %NULL is returned.
2109 * Note that the offset found in index @index itself is not
2110 * verified; this function assumes that @num_valid elements
2111 * from @start were previously verified to have valid offsets.
2112 * If @object_offsetp is non-NULL, then the offset within
2113 * @b is written to it.
2114 */
2115static struct binder_buffer_object *binder_validate_ptr(
2116 struct binder_proc *proc,
2117 struct binder_buffer *b,
2118 struct binder_object *object,
2119 binder_size_t index,
2120 binder_size_t start_offset,
2121 binder_size_t *object_offsetp,
2122 binder_size_t num_valid)
2123{
2124 size_t object_size;
2125 binder_size_t object_offset;
2126 unsigned long buffer_offset;
2127
2128 if (index >= num_valid)
2129 return NULL;
2130
2131 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2132 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2133 b, buffer_offset, sizeof(object_offset));
2134 object_size = binder_get_object(proc, b, object_offset, object);
2135 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2136 return NULL;
2137 if (object_offsetp)
2138 *object_offsetp = object_offset;
2139
2140 return &object->bbo;
2141}
2142
2143/**
2144 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2145 * @proc: binder_proc owning the buffer
2146 * @b: transaction buffer
2147 * @objects_start_offset: offset to start of objects buffer
2148 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2149 * @fixup_offset: start offset in @buffer to fix up
2150 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2151 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2152 *
2153 * Return: %true if a fixup in buffer @buffer at offset @offset is
2154 * allowed.
2155 *
2156 * For safety reasons, we only allow fixups inside a buffer to happen
2157 * at increasing offsets; additionally, we only allow fixup on the last
2158 * buffer object that was verified, or one of its parents.
2159 *
2160 * Example of what is allowed:
2161 *
2162 * A
2163 * B (parent = A, offset = 0)
2164 * C (parent = A, offset = 16)
2165 * D (parent = C, offset = 0)
2166 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2167 *
2168 * Examples of what is not allowed:
2169 *
2170 * Decreasing offsets within the same parent:
2171 * A
2172 * C (parent = A, offset = 16)
2173 * B (parent = A, offset = 0) // decreasing offset within A
2174 *
2175 * Referring to a parent that wasn't the last object or any of its parents:
2176 * A
2177 * B (parent = A, offset = 0)
2178 * C (parent = A, offset = 0)
2179 * C (parent = A, offset = 16)
2180 * D (parent = B, offset = 0) // B is not A or any of A's parents
2181 */
2182static bool binder_validate_fixup(struct binder_proc *proc,
2183 struct binder_buffer *b,
2184 binder_size_t objects_start_offset,
2185 binder_size_t buffer_obj_offset,
2186 binder_size_t fixup_offset,
2187 binder_size_t last_obj_offset,
2188 binder_size_t last_min_offset)
2189{
2190 if (!last_obj_offset) {
2191 /* Nothing to fix up in */
2192 return false;
2193 }
2194
2195 while (last_obj_offset != buffer_obj_offset) {
2196 unsigned long buffer_offset;
2197 struct binder_object last_object;
2198 struct binder_buffer_object *last_bbo;
2199 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2200 &last_object);
2201 if (object_size != sizeof(*last_bbo))
2202 return false;
2203
2204 last_bbo = &last_object.bbo;
2205 /*
2206 * Safe to retrieve the parent of last_obj, since it
2207 * was already previously verified by the driver.
2208 */
2209 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2210 return false;
2211 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2212 buffer_offset = objects_start_offset +
2213 sizeof(binder_size_t) * last_bbo->parent,
2214 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2215 b, buffer_offset,
2216 sizeof(last_obj_offset));
2217 }
2218 return (fixup_offset >= last_min_offset);
2219}
2220
2221/**
2222 * struct binder_task_work_cb - for deferred close
2223 *
2224 * @twork: callback_head for task work
2225 * @fd: fd to close
2226 *
2227 * Structure to pass task work to be handled after
2228 * returning from binder_ioctl() via task_work_add().
2229 */
2230struct binder_task_work_cb {
2231 struct callback_head twork;
2232 struct file *file;
2233};
2234
2235/**
2236 * binder_do_fd_close() - close list of file descriptors
2237 * @twork: callback head for task work
2238 *
2239 * It is not safe to call ksys_close() during the binder_ioctl()
2240 * function if there is a chance that binder's own file descriptor
2241 * might be closed. This is to meet the requirements for using
2242 * fdget() (see comments for __fget_light()). Therefore use
2243 * task_work_add() to schedule the close operation once we have
2244 * returned from binder_ioctl(). This function is a callback
2245 * for that mechanism and does the actual ksys_close() on the
2246 * given file descriptor.
2247 */
2248static void binder_do_fd_close(struct callback_head *twork)
2249{
2250 struct binder_task_work_cb *twcb = container_of(twork,
2251 struct binder_task_work_cb, twork);
2252
2253 fput(twcb->file);
2254 kfree(twcb);
2255}
2256
2257/**
2258 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2259 * @fd: file-descriptor to close
2260 *
2261 * See comments in binder_do_fd_close(). This function is used to schedule
2262 * a file-descriptor to be closed after returning from binder_ioctl().
2263 */
2264static void binder_deferred_fd_close(int fd)
2265{
2266 struct binder_task_work_cb *twcb;
2267
2268 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2269 if (!twcb)
2270 return;
2271 init_task_work(&twcb->twork, binder_do_fd_close);
2272 __close_fd_get_file(fd, &twcb->file);
2273 if (twcb->file)
2274 task_work_add(current, &twcb->twork, true);
2275 else
2276 kfree(twcb);
2277}
2278
2279static void binder_transaction_buffer_release(struct binder_proc *proc,
2280 struct binder_buffer *buffer,
2281 binder_size_t failed_at,
2282 bool is_failure)
2283{
2284 int debug_id = buffer->debug_id;
2285 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2286
2287 binder_debug(BINDER_DEBUG_TRANSACTION,
2288 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2289 proc->pid, buffer->debug_id,
2290 buffer->data_size, buffer->offsets_size,
2291 (unsigned long long)failed_at);
2292
2293 if (buffer->target_node)
2294 binder_dec_node(buffer->target_node, 1, 0);
2295
2296 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2297 off_end_offset = is_failure ? failed_at :
2298 off_start_offset + buffer->offsets_size;
2299 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2300 buffer_offset += sizeof(binder_size_t)) {
2301 struct binder_object_header *hdr;
2302 size_t object_size;
2303 struct binder_object object;
2304 binder_size_t object_offset;
2305
2306 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2307 buffer, buffer_offset,
2308 sizeof(object_offset));
2309 object_size = binder_get_object(proc, buffer,
2310 object_offset, &object);
2311 if (object_size == 0) {
2312 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2313 debug_id, (u64)object_offset, buffer->data_size);
2314 continue;
2315 }
2316 hdr = &object.hdr;
2317 switch (hdr->type) {
2318 case BINDER_TYPE_BINDER:
2319 case BINDER_TYPE_WEAK_BINDER: {
2320 struct flat_binder_object *fp;
2321 struct binder_node *node;
2322
2323 fp = to_flat_binder_object(hdr);
2324 node = binder_get_node(proc, fp->binder);
2325 if (node == NULL) {
2326 pr_err("transaction release %d bad node %016llx\n",
2327 debug_id, (u64)fp->binder);
2328 break;
2329 }
2330 binder_debug(BINDER_DEBUG_TRANSACTION,
2331 " node %d u%016llx\n",
2332 node->debug_id, (u64)node->ptr);
2333 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2334 0);
2335 binder_put_node(node);
2336 } break;
2337 case BINDER_TYPE_HANDLE:
2338 case BINDER_TYPE_WEAK_HANDLE: {
2339 struct flat_binder_object *fp;
2340 struct binder_ref_data rdata;
2341 int ret;
2342
2343 fp = to_flat_binder_object(hdr);
2344 ret = binder_dec_ref_for_handle(proc, fp->handle,
2345 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2346
2347 if (ret) {
2348 pr_err("transaction release %d bad handle %d, ret = %d\n",
2349 debug_id, fp->handle, ret);
2350 break;
2351 }
2352 binder_debug(BINDER_DEBUG_TRANSACTION,
2353 " ref %d desc %d\n",
2354 rdata.debug_id, rdata.desc);
2355 } break;
2356
2357 case BINDER_TYPE_FD: {
2358 /*
2359 * No need to close the file here since user-space
2360 * closes it for for successfully delivered
2361 * transactions. For transactions that weren't
2362 * delivered, the new fd was never allocated so
2363 * there is no need to close and the fput on the
2364 * file is done when the transaction is torn
2365 * down.
2366 */
2367 WARN_ON(failed_at &&
2368 proc->tsk == current->group_leader);
2369 } break;
2370 case BINDER_TYPE_PTR:
2371 /*
2372 * Nothing to do here, this will get cleaned up when the
2373 * transaction buffer gets freed
2374 */
2375 break;
2376 case BINDER_TYPE_FDA: {
2377 struct binder_fd_array_object *fda;
2378 struct binder_buffer_object *parent;
2379 struct binder_object ptr_object;
2380 binder_size_t fda_offset;
2381 size_t fd_index;
2382 binder_size_t fd_buf_size;
2383 binder_size_t num_valid;
2384
2385 if (proc->tsk != current->group_leader) {
2386 /*
2387 * Nothing to do if running in sender context
2388 * The fd fixups have not been applied so no
2389 * fds need to be closed.
2390 */
2391 continue;
2392 }
2393
2394 num_valid = (buffer_offset - off_start_offset) /
2395 sizeof(binder_size_t);
2396 fda = to_binder_fd_array_object(hdr);
2397 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2398 fda->parent,
2399 off_start_offset,
2400 NULL,
2401 num_valid);
2402 if (!parent) {
2403 pr_err("transaction release %d bad parent offset\n",
2404 debug_id);
2405 continue;
2406 }
2407 fd_buf_size = sizeof(u32) * fda->num_fds;
2408 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2409 pr_err("transaction release %d invalid number of fds (%lld)\n",
2410 debug_id, (u64)fda->num_fds);
2411 continue;
2412 }
2413 if (fd_buf_size > parent->length ||
2414 fda->parent_offset > parent->length - fd_buf_size) {
2415 /* No space for all file descriptors here. */
2416 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2417 debug_id, (u64)fda->num_fds);
2418 continue;
2419 }
2420 /*
2421 * the source data for binder_buffer_object is visible
2422 * to user-space and the @buffer element is the user
2423 * pointer to the buffer_object containing the fd_array.
2424 * Convert the address to an offset relative to
2425 * the base of the transaction buffer.
2426 */
2427 fda_offset =
2428 (parent->buffer - (uintptr_t)buffer->user_data) +
2429 fda->parent_offset;
2430 for (fd_index = 0; fd_index < fda->num_fds;
2431 fd_index++) {
2432 u32 fd;
2433 binder_size_t offset = fda_offset +
2434 fd_index * sizeof(fd);
2435
2436 binder_alloc_copy_from_buffer(&proc->alloc,
2437 &fd,
2438 buffer,
2439 offset,
2440 sizeof(fd));
2441 binder_deferred_fd_close(fd);
2442 }
2443 } break;
2444 default:
2445 pr_err("transaction release %d bad object type %x\n",
2446 debug_id, hdr->type);
2447 break;
2448 }
2449 }
2450}
2451
2452static int binder_translate_binder(struct flat_binder_object *fp,
2453 struct binder_transaction *t,
2454 struct binder_thread *thread)
2455{
2456 struct binder_node *node;
2457 struct binder_proc *proc = thread->proc;
2458 struct binder_proc *target_proc = t->to_proc;
2459 struct binder_ref_data rdata;
2460 int ret = 0;
2461
2462 node = binder_get_node(proc, fp->binder);
2463 if (!node) {
2464 node = binder_new_node(proc, fp);
2465 if (!node)
2466 return -ENOMEM;
2467 }
2468 if (fp->cookie != node->cookie) {
2469 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2470 proc->pid, thread->pid, (u64)fp->binder,
2471 node->debug_id, (u64)fp->cookie,
2472 (u64)node->cookie);
2473 ret = -EINVAL;
2474 goto done;
2475 }
2476 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2477 ret = -EPERM;
2478 goto done;
2479 }
2480
2481 ret = binder_inc_ref_for_node(target_proc, node,
2482 fp->hdr.type == BINDER_TYPE_BINDER,
2483 &thread->todo, &rdata);
2484 if (ret)
2485 goto done;
2486
2487 if (fp->hdr.type == BINDER_TYPE_BINDER)
2488 fp->hdr.type = BINDER_TYPE_HANDLE;
2489 else
2490 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2491 fp->binder = 0;
2492 fp->handle = rdata.desc;
2493 fp->cookie = 0;
2494
2495 trace_binder_transaction_node_to_ref(t, node, &rdata);
2496 binder_debug(BINDER_DEBUG_TRANSACTION,
2497 " node %d u%016llx -> ref %d desc %d\n",
2498 node->debug_id, (u64)node->ptr,
2499 rdata.debug_id, rdata.desc);
2500done:
2501 binder_put_node(node);
2502 return ret;
2503}
2504
2505static int binder_translate_handle(struct flat_binder_object *fp,
2506 struct binder_transaction *t,
2507 struct binder_thread *thread)
2508{
2509 struct binder_proc *proc = thread->proc;
2510 struct binder_proc *target_proc = t->to_proc;
2511 struct binder_node *node;
2512 struct binder_ref_data src_rdata;
2513 int ret = 0;
2514
2515 node = binder_get_node_from_ref(proc, fp->handle,
2516 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2517 if (!node) {
2518 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2519 proc->pid, thread->pid, fp->handle);
2520 return -EINVAL;
2521 }
2522 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2523 ret = -EPERM;
2524 goto done;
2525 }
2526
2527 binder_node_lock(node);
2528 if (node->proc == target_proc) {
2529 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2530 fp->hdr.type = BINDER_TYPE_BINDER;
2531 else
2532 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2533 fp->binder = node->ptr;
2534 fp->cookie = node->cookie;
2535 if (node->proc)
2536 binder_inner_proc_lock(node->proc);
2537 else
2538 __acquire(&node->proc->inner_lock);
2539 binder_inc_node_nilocked(node,
2540 fp->hdr.type == BINDER_TYPE_BINDER,
2541 0, NULL);
2542 if (node->proc)
2543 binder_inner_proc_unlock(node->proc);
2544 else
2545 __release(&node->proc->inner_lock);
2546 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2547 binder_debug(BINDER_DEBUG_TRANSACTION,
2548 " ref %d desc %d -> node %d u%016llx\n",
2549 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2550 (u64)node->ptr);
2551 binder_node_unlock(node);
2552 } else {
2553 struct binder_ref_data dest_rdata;
2554
2555 binder_node_unlock(node);
2556 ret = binder_inc_ref_for_node(target_proc, node,
2557 fp->hdr.type == BINDER_TYPE_HANDLE,
2558 NULL, &dest_rdata);
2559 if (ret)
2560 goto done;
2561
2562 fp->binder = 0;
2563 fp->handle = dest_rdata.desc;
2564 fp->cookie = 0;
2565 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2566 &dest_rdata);
2567 binder_debug(BINDER_DEBUG_TRANSACTION,
2568 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2569 src_rdata.debug_id, src_rdata.desc,
2570 dest_rdata.debug_id, dest_rdata.desc,
2571 node->debug_id);
2572 }
2573done:
2574 binder_put_node(node);
2575 return ret;
2576}
2577
2578static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2579 struct binder_transaction *t,
2580 struct binder_thread *thread,
2581 struct binder_transaction *in_reply_to)
2582{
2583 struct binder_proc *proc = thread->proc;
2584 struct binder_proc *target_proc = t->to_proc;
2585 struct binder_txn_fd_fixup *fixup;
2586 struct file *file;
2587 int ret = 0;
2588 bool target_allows_fd;
2589
2590 if (in_reply_to)
2591 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2592 else
2593 target_allows_fd = t->buffer->target_node->accept_fds;
2594 if (!target_allows_fd) {
2595 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2596 proc->pid, thread->pid,
2597 in_reply_to ? "reply" : "transaction",
2598 fd);
2599 ret = -EPERM;
2600 goto err_fd_not_accepted;
2601 }
2602
2603 file = fget(fd);
2604 if (!file) {
2605 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2606 proc->pid, thread->pid, fd);
2607 ret = -EBADF;
2608 goto err_fget;
2609 }
2610 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2611 if (ret < 0) {
2612 ret = -EPERM;
2613 goto err_security;
2614 }
2615
2616 /*
2617 * Add fixup record for this transaction. The allocation
2618 * of the fd in the target needs to be done from a
2619 * target thread.
2620 */
2621 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2622 if (!fixup) {
2623 ret = -ENOMEM;
2624 goto err_alloc;
2625 }
2626 fixup->file = file;
2627 fixup->offset = fd_offset;
2628 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2629 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2630
2631 return ret;
2632
2633err_alloc:
2634err_security:
2635 fput(file);
2636err_fget:
2637err_fd_not_accepted:
2638 return ret;
2639}
2640
2641static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2642 struct binder_buffer_object *parent,
2643 struct binder_transaction *t,
2644 struct binder_thread *thread,
2645 struct binder_transaction *in_reply_to)
2646{
2647 binder_size_t fdi, fd_buf_size;
2648 binder_size_t fda_offset;
2649 struct binder_proc *proc = thread->proc;
2650 struct binder_proc *target_proc = t->to_proc;
2651
2652 fd_buf_size = sizeof(u32) * fda->num_fds;
2653 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2654 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2655 proc->pid, thread->pid, (u64)fda->num_fds);
2656 return -EINVAL;
2657 }
2658 if (fd_buf_size > parent->length ||
2659 fda->parent_offset > parent->length - fd_buf_size) {
2660 /* No space for all file descriptors here. */
2661 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2662 proc->pid, thread->pid, (u64)fda->num_fds);
2663 return -EINVAL;
2664 }
2665 /*
2666 * the source data for binder_buffer_object is visible
2667 * to user-space and the @buffer element is the user
2668 * pointer to the buffer_object containing the fd_array.
2669 * Convert the address to an offset relative to
2670 * the base of the transaction buffer.
2671 */
2672 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2673 fda->parent_offset;
2674 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2675 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2676 proc->pid, thread->pid);
2677 return -EINVAL;
2678 }
2679 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2680 u32 fd;
2681 int ret;
2682 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2683
2684 binder_alloc_copy_from_buffer(&target_proc->alloc,
2685 &fd, t->buffer,
2686 offset, sizeof(fd));
2687 ret = binder_translate_fd(fd, offset, t, thread,
2688 in_reply_to);
2689 if (ret < 0)
2690 return ret;
2691 }
2692 return 0;
2693}
2694
2695static int binder_fixup_parent(struct binder_transaction *t,
2696 struct binder_thread *thread,
2697 struct binder_buffer_object *bp,
2698 binder_size_t off_start_offset,
2699 binder_size_t num_valid,
2700 binder_size_t last_fixup_obj_off,
2701 binder_size_t last_fixup_min_off)
2702{
2703 struct binder_buffer_object *parent;
2704 struct binder_buffer *b = t->buffer;
2705 struct binder_proc *proc = thread->proc;
2706 struct binder_proc *target_proc = t->to_proc;
2707 struct binder_object object;
2708 binder_size_t buffer_offset;
2709 binder_size_t parent_offset;
2710
2711 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2712 return 0;
2713
2714 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2715 off_start_offset, &parent_offset,
2716 num_valid);
2717 if (!parent) {
2718 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2719 proc->pid, thread->pid);
2720 return -EINVAL;
2721 }
2722
2723 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2724 parent_offset, bp->parent_offset,
2725 last_fixup_obj_off,
2726 last_fixup_min_off)) {
2727 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2728 proc->pid, thread->pid);
2729 return -EINVAL;
2730 }
2731
2732 if (parent->length < sizeof(binder_uintptr_t) ||
2733 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2734 /* No space for a pointer here! */
2735 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2736 proc->pid, thread->pid);
2737 return -EINVAL;
2738 }
2739 buffer_offset = bp->parent_offset +
2740 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2741 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2742 &bp->buffer, sizeof(bp->buffer));
2743
2744 return 0;
2745}
2746
2747/**
2748 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2749 * @t: transaction to send
2750 * @proc: process to send the transaction to
2751 * @thread: thread in @proc to send the transaction to (may be NULL)
2752 *
2753 * This function queues a transaction to the specified process. It will try
2754 * to find a thread in the target process to handle the transaction and
2755 * wake it up. If no thread is found, the work is queued to the proc
2756 * waitqueue.
2757 *
2758 * If the @thread parameter is not NULL, the transaction is always queued
2759 * to the waitlist of that specific thread.
2760 *
2761 * Return: true if the transactions was successfully queued
2762 * false if the target process or thread is dead
2763 */
2764static bool binder_proc_transaction(struct binder_transaction *t,
2765 struct binder_proc *proc,
2766 struct binder_thread *thread)
2767{
2768 struct binder_node *node = t->buffer->target_node;
2769 bool oneway = !!(t->flags & TF_ONE_WAY);
2770 bool pending_async = false;
2771
2772 BUG_ON(!node);
2773 binder_node_lock(node);
2774 if (oneway) {
2775 BUG_ON(thread);
2776 if (node->has_async_transaction) {
2777 pending_async = true;
2778 } else {
2779 node->has_async_transaction = true;
2780 }
2781 }
2782
2783 binder_inner_proc_lock(proc);
2784
2785 if (proc->is_dead || (thread && thread->is_dead)) {
2786 binder_inner_proc_unlock(proc);
2787 binder_node_unlock(node);
2788 return false;
2789 }
2790
2791 if (!thread && !pending_async)
2792 thread = binder_select_thread_ilocked(proc);
2793
2794 if (thread)
2795 binder_enqueue_thread_work_ilocked(thread, &t->work);
2796 else if (!pending_async)
2797 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2798 else
2799 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2800
2801 if (!pending_async)
2802 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2803
2804 binder_inner_proc_unlock(proc);
2805 binder_node_unlock(node);
2806
2807 return true;
2808}
2809
2810/**
2811 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2812 * @node: struct binder_node for which to get refs
2813 * @proc: returns @node->proc if valid
2814 * @error: if no @proc then returns BR_DEAD_REPLY
2815 *
2816 * User-space normally keeps the node alive when creating a transaction
2817 * since it has a reference to the target. The local strong ref keeps it
2818 * alive if the sending process dies before the target process processes
2819 * the transaction. If the source process is malicious or has a reference
2820 * counting bug, relying on the local strong ref can fail.
2821 *
2822 * Since user-space can cause the local strong ref to go away, we also take
2823 * a tmpref on the node to ensure it survives while we are constructing
2824 * the transaction. We also need a tmpref on the proc while we are
2825 * constructing the transaction, so we take that here as well.
2826 *
2827 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2828 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2829 * target proc has died, @error is set to BR_DEAD_REPLY
2830 */
2831static struct binder_node *binder_get_node_refs_for_txn(
2832 struct binder_node *node,
2833 struct binder_proc **procp,
2834 uint32_t *error)
2835{
2836 struct binder_node *target_node = NULL;
2837
2838 binder_node_inner_lock(node);
2839 if (node->proc) {
2840 target_node = node;
2841 binder_inc_node_nilocked(node, 1, 0, NULL);
2842 binder_inc_node_tmpref_ilocked(node);
2843 node->proc->tmp_ref++;
2844 *procp = node->proc;
2845 } else
2846 *error = BR_DEAD_REPLY;
2847 binder_node_inner_unlock(node);
2848
2849 return target_node;
2850}
2851
2852static void binder_transaction(struct binder_proc *proc,
2853 struct binder_thread *thread,
2854 struct binder_transaction_data *tr, int reply,
2855 binder_size_t extra_buffers_size)
2856{
2857 int ret;
2858 struct binder_transaction *t;
2859 struct binder_work *w;
2860 struct binder_work *tcomplete;
2861 binder_size_t buffer_offset = 0;
2862 binder_size_t off_start_offset, off_end_offset;
2863 binder_size_t off_min;
2864 binder_size_t sg_buf_offset, sg_buf_end_offset;
2865 struct binder_proc *target_proc = NULL;
2866 struct binder_thread *target_thread = NULL;
2867 struct binder_node *target_node = NULL;
2868 struct binder_transaction *in_reply_to = NULL;
2869 struct binder_transaction_log_entry *e;
2870 uint32_t return_error = 0;
2871 uint32_t return_error_param = 0;
2872 uint32_t return_error_line = 0;
2873 binder_size_t last_fixup_obj_off = 0;
2874 binder_size_t last_fixup_min_off = 0;
2875 struct binder_context *context = proc->context;
2876 int t_debug_id = atomic_inc_return(&binder_last_id);
2877 char *secctx = NULL;
2878 u32 secctx_sz = 0;
2879
2880 e = binder_transaction_log_add(&binder_transaction_log);
2881 e->debug_id = t_debug_id;
2882 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2883 e->from_proc = proc->pid;
2884 e->from_thread = thread->pid;
2885 e->target_handle = tr->target.handle;
2886 e->data_size = tr->data_size;
2887 e->offsets_size = tr->offsets_size;
2888 e->context_name = proc->context->name;
2889
2890 if (reply) {
2891 binder_inner_proc_lock(proc);
2892 in_reply_to = thread->transaction_stack;
2893 if (in_reply_to == NULL) {
2894 binder_inner_proc_unlock(proc);
2895 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2896 proc->pid, thread->pid);
2897 return_error = BR_FAILED_REPLY;
2898 return_error_param = -EPROTO;
2899 return_error_line = __LINE__;
2900 goto err_empty_call_stack;
2901 }
2902 if (in_reply_to->to_thread != thread) {
2903 spin_lock(&in_reply_to->lock);
2904 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2905 proc->pid, thread->pid, in_reply_to->debug_id,
2906 in_reply_to->to_proc ?
2907 in_reply_to->to_proc->pid : 0,
2908 in_reply_to->to_thread ?
2909 in_reply_to->to_thread->pid : 0);
2910 spin_unlock(&in_reply_to->lock);
2911 binder_inner_proc_unlock(proc);
2912 return_error = BR_FAILED_REPLY;
2913 return_error_param = -EPROTO;
2914 return_error_line = __LINE__;
2915 in_reply_to = NULL;
2916 goto err_bad_call_stack;
2917 }
2918 thread->transaction_stack = in_reply_to->to_parent;
2919 binder_inner_proc_unlock(proc);
2920 binder_set_nice(in_reply_to->saved_priority);
2921 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2922 if (target_thread == NULL) {
2923 /* annotation for sparse */
2924 __release(&target_thread->proc->inner_lock);
2925 return_error = BR_DEAD_REPLY;
2926 return_error_line = __LINE__;
2927 goto err_dead_binder;
2928 }
2929 if (target_thread->transaction_stack != in_reply_to) {
2930 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2931 proc->pid, thread->pid,
2932 target_thread->transaction_stack ?
2933 target_thread->transaction_stack->debug_id : 0,
2934 in_reply_to->debug_id);
2935 binder_inner_proc_unlock(target_thread->proc);
2936 return_error = BR_FAILED_REPLY;
2937 return_error_param = -EPROTO;
2938 return_error_line = __LINE__;
2939 in_reply_to = NULL;
2940 target_thread = NULL;
2941 goto err_dead_binder;
2942 }
2943 target_proc = target_thread->proc;
2944 target_proc->tmp_ref++;
2945 binder_inner_proc_unlock(target_thread->proc);
2946 } else {
2947 if (tr->target.handle) {
2948 struct binder_ref *ref;
2949
2950 /*
2951 * There must already be a strong ref
2952 * on this node. If so, do a strong
2953 * increment on the node to ensure it
2954 * stays alive until the transaction is
2955 * done.
2956 */
2957 binder_proc_lock(proc);
2958 ref = binder_get_ref_olocked(proc, tr->target.handle,
2959 true);
2960 if (ref) {
2961 target_node = binder_get_node_refs_for_txn(
2962 ref->node, &target_proc,
2963 &return_error);
2964 } else {
2965 binder_user_error("%d:%d got transaction to invalid handle\n",
2966 proc->pid, thread->pid);
2967 return_error = BR_FAILED_REPLY;
2968 }
2969 binder_proc_unlock(proc);
2970 } else {
2971 mutex_lock(&context->context_mgr_node_lock);
2972 target_node = context->binder_context_mgr_node;
2973 if (target_node)
2974 target_node = binder_get_node_refs_for_txn(
2975 target_node, &target_proc,
2976 &return_error);
2977 else
2978 return_error = BR_DEAD_REPLY;
2979 mutex_unlock(&context->context_mgr_node_lock);
2980 if (target_node && target_proc == proc) {
2981 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2982 proc->pid, thread->pid);
2983 return_error = BR_FAILED_REPLY;
2984 return_error_param = -EINVAL;
2985 return_error_line = __LINE__;
2986 goto err_invalid_target_handle;
2987 }
2988 }
2989 if (!target_node) {
2990 /*
2991 * return_error is set above
2992 */
2993 return_error_param = -EINVAL;
2994 return_error_line = __LINE__;
2995 goto err_dead_binder;
2996 }
2997 e->to_node = target_node->debug_id;
2998 if (security_binder_transaction(proc->tsk,
2999 target_proc->tsk) < 0) {
3000 return_error = BR_FAILED_REPLY;
3001 return_error_param = -EPERM;
3002 return_error_line = __LINE__;
3003 goto err_invalid_target_handle;
3004 }
3005 binder_inner_proc_lock(proc);
3006
3007 w = list_first_entry_or_null(&thread->todo,
3008 struct binder_work, entry);
3009 if (!(tr->flags & TF_ONE_WAY) && w &&
3010 w->type == BINDER_WORK_TRANSACTION) {
3011 /*
3012 * Do not allow new outgoing transaction from a
3013 * thread that has a transaction at the head of
3014 * its todo list. Only need to check the head
3015 * because binder_select_thread_ilocked picks a
3016 * thread from proc->waiting_threads to enqueue
3017 * the transaction, and nothing is queued to the
3018 * todo list while the thread is on waiting_threads.
3019 */
3020 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3021 proc->pid, thread->pid);
3022 binder_inner_proc_unlock(proc);
3023 return_error = BR_FAILED_REPLY;
3024 return_error_param = -EPROTO;
3025 return_error_line = __LINE__;
3026 goto err_bad_todo_list;
3027 }
3028
3029 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3030 struct binder_transaction *tmp;
3031
3032 tmp = thread->transaction_stack;
3033 if (tmp->to_thread != thread) {
3034 spin_lock(&tmp->lock);
3035 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3036 proc->pid, thread->pid, tmp->debug_id,
3037 tmp->to_proc ? tmp->to_proc->pid : 0,
3038 tmp->to_thread ?
3039 tmp->to_thread->pid : 0);
3040 spin_unlock(&tmp->lock);
3041 binder_inner_proc_unlock(proc);
3042 return_error = BR_FAILED_REPLY;
3043 return_error_param = -EPROTO;
3044 return_error_line = __LINE__;
3045 goto err_bad_call_stack;
3046 }
3047 while (tmp) {
3048 struct binder_thread *from;
3049
3050 spin_lock(&tmp->lock);
3051 from = tmp->from;
3052 if (from && from->proc == target_proc) {
3053 atomic_inc(&from->tmp_ref);
3054 target_thread = from;
3055 spin_unlock(&tmp->lock);
3056 break;
3057 }
3058 spin_unlock(&tmp->lock);
3059 tmp = tmp->from_parent;
3060 }
3061 }
3062 binder_inner_proc_unlock(proc);
3063 }
3064 if (target_thread)
3065 e->to_thread = target_thread->pid;
3066 e->to_proc = target_proc->pid;
3067
3068 /* TODO: reuse incoming transaction for reply */
3069 t = kzalloc(sizeof(*t), GFP_KERNEL);
3070 if (t == NULL) {
3071 return_error = BR_FAILED_REPLY;
3072 return_error_param = -ENOMEM;
3073 return_error_line = __LINE__;
3074 goto err_alloc_t_failed;
3075 }
3076 INIT_LIST_HEAD(&t->fd_fixups);
3077 binder_stats_created(BINDER_STAT_TRANSACTION);
3078 spin_lock_init(&t->lock);
3079
3080 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3081 if (tcomplete == NULL) {
3082 return_error = BR_FAILED_REPLY;
3083 return_error_param = -ENOMEM;
3084 return_error_line = __LINE__;
3085 goto err_alloc_tcomplete_failed;
3086 }
3087 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3088
3089 t->debug_id = t_debug_id;
3090
3091 if (reply)
3092 binder_debug(BINDER_DEBUG_TRANSACTION,
3093 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3094 proc->pid, thread->pid, t->debug_id,
3095 target_proc->pid, target_thread->pid,
3096 (u64)tr->data.ptr.buffer,
3097 (u64)tr->data.ptr.offsets,
3098 (u64)tr->data_size, (u64)tr->offsets_size,
3099 (u64)extra_buffers_size);
3100 else
3101 binder_debug(BINDER_DEBUG_TRANSACTION,
3102 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3103 proc->pid, thread->pid, t->debug_id,
3104 target_proc->pid, target_node->debug_id,
3105 (u64)tr->data.ptr.buffer,
3106 (u64)tr->data.ptr.offsets,
3107 (u64)tr->data_size, (u64)tr->offsets_size,
3108 (u64)extra_buffers_size);
3109
3110 if (!reply && !(tr->flags & TF_ONE_WAY))
3111 t->from = thread;
3112 else
3113 t->from = NULL;
3114 t->sender_euid = task_euid(proc->tsk);
3115 t->to_proc = target_proc;
3116 t->to_thread = target_thread;
3117 t->code = tr->code;
3118 t->flags = tr->flags;
3119 t->priority = task_nice(current);
3120
3121 if (target_node && target_node->txn_security_ctx) {
3122 u32 secid;
3123
3124 security_task_getsecid(proc->tsk, &secid);
3125 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3126 if (ret) {
3127 return_error = BR_FAILED_REPLY;
3128 return_error_param = ret;
3129 return_error_line = __LINE__;
3130 goto err_get_secctx_failed;
3131 }
3132 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
3133 }
3134
3135 trace_binder_transaction(reply, t, target_node);
3136
3137 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3138 tr->offsets_size, extra_buffers_size,
3139 !reply && (t->flags & TF_ONE_WAY));
3140 if (IS_ERR(t->buffer)) {
3141 /*
3142 * -ESRCH indicates VMA cleared. The target is dying.
3143 */
3144 return_error_param = PTR_ERR(t->buffer);
3145 return_error = return_error_param == -ESRCH ?
3146 BR_DEAD_REPLY : BR_FAILED_REPLY;
3147 return_error_line = __LINE__;
3148 t->buffer = NULL;
3149 goto err_binder_alloc_buf_failed;
3150 }
3151 if (secctx) {
3152 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3153 ALIGN(tr->offsets_size, sizeof(void *)) +
3154 ALIGN(extra_buffers_size, sizeof(void *)) -
3155 ALIGN(secctx_sz, sizeof(u64));
3156
3157 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3158 binder_alloc_copy_to_buffer(&target_proc->alloc,
3159 t->buffer, buf_offset,
3160 secctx, secctx_sz);
3161 security_release_secctx(secctx, secctx_sz);
3162 secctx = NULL;
3163 }
3164 t->buffer->debug_id = t->debug_id;
3165 t->buffer->transaction = t;
3166 t->buffer->target_node = target_node;
3167 trace_binder_transaction_alloc_buf(t->buffer);
3168
3169 if (binder_alloc_copy_user_to_buffer(
3170 &target_proc->alloc,
3171 t->buffer, 0,
3172 (const void __user *)
3173 (uintptr_t)tr->data.ptr.buffer,
3174 tr->data_size)) {
3175 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3176 proc->pid, thread->pid);
3177 return_error = BR_FAILED_REPLY;
3178 return_error_param = -EFAULT;
3179 return_error_line = __LINE__;
3180 goto err_copy_data_failed;
3181 }
3182 if (binder_alloc_copy_user_to_buffer(
3183 &target_proc->alloc,
3184 t->buffer,
3185 ALIGN(tr->data_size, sizeof(void *)),
3186 (const void __user *)
3187 (uintptr_t)tr->data.ptr.offsets,
3188 tr->offsets_size)) {
3189 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3190 proc->pid, thread->pid);
3191 return_error = BR_FAILED_REPLY;
3192 return_error_param = -EFAULT;
3193 return_error_line = __LINE__;
3194 goto err_copy_data_failed;
3195 }
3196 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3197 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3198 proc->pid, thread->pid, (u64)tr->offsets_size);
3199 return_error = BR_FAILED_REPLY;
3200 return_error_param = -EINVAL;
3201 return_error_line = __LINE__;
3202 goto err_bad_offset;
3203 }
3204 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3205 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3206 proc->pid, thread->pid,
3207 (u64)extra_buffers_size);
3208 return_error = BR_FAILED_REPLY;
3209 return_error_param = -EINVAL;
3210 return_error_line = __LINE__;
3211 goto err_bad_offset;
3212 }
3213 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3214 buffer_offset = off_start_offset;
3215 off_end_offset = off_start_offset + tr->offsets_size;
3216 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3217 sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3218 off_min = 0;
3219 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3220 buffer_offset += sizeof(binder_size_t)) {
3221 struct binder_object_header *hdr;
3222 size_t object_size;
3223 struct binder_object object;
3224 binder_size_t object_offset;
3225
3226 binder_alloc_copy_from_buffer(&target_proc->alloc,
3227 &object_offset,
3228 t->buffer,
3229 buffer_offset,
3230 sizeof(object_offset));
3231 object_size = binder_get_object(target_proc, t->buffer,
3232 object_offset, &object);
3233 if (object_size == 0 || object_offset < off_min) {
3234 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3235 proc->pid, thread->pid,
3236 (u64)object_offset,
3237 (u64)off_min,
3238 (u64)t->buffer->data_size);
3239 return_error = BR_FAILED_REPLY;
3240 return_error_param = -EINVAL;
3241 return_error_line = __LINE__;
3242 goto err_bad_offset;
3243 }
3244
3245 hdr = &object.hdr;
3246 off_min = object_offset + object_size;
3247 switch (hdr->type) {
3248 case BINDER_TYPE_BINDER:
3249 case BINDER_TYPE_WEAK_BINDER: {
3250 struct flat_binder_object *fp;
3251
3252 fp = to_flat_binder_object(hdr);
3253 ret = binder_translate_binder(fp, t, thread);
3254 if (ret < 0) {
3255 return_error = BR_FAILED_REPLY;
3256 return_error_param = ret;
3257 return_error_line = __LINE__;
3258 goto err_translate_failed;
3259 }
3260 binder_alloc_copy_to_buffer(&target_proc->alloc,
3261 t->buffer, object_offset,
3262 fp, sizeof(*fp));
3263 } break;
3264 case BINDER_TYPE_HANDLE:
3265 case BINDER_TYPE_WEAK_HANDLE: {
3266 struct flat_binder_object *fp;
3267
3268 fp = to_flat_binder_object(hdr);
3269 ret = binder_translate_handle(fp, t, thread);
3270 if (ret < 0) {
3271 return_error = BR_FAILED_REPLY;
3272 return_error_param = ret;
3273 return_error_line = __LINE__;
3274 goto err_translate_failed;
3275 }
3276 binder_alloc_copy_to_buffer(&target_proc->alloc,
3277 t->buffer, object_offset,
3278 fp, sizeof(*fp));
3279 } break;
3280
3281 case BINDER_TYPE_FD: {
3282 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3283 binder_size_t fd_offset = object_offset +
3284 (uintptr_t)&fp->fd - (uintptr_t)fp;
3285 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3286 thread, in_reply_to);
3287
3288 if (ret < 0) {
3289 return_error = BR_FAILED_REPLY;
3290 return_error_param = ret;
3291 return_error_line = __LINE__;
3292 goto err_translate_failed;
3293 }
3294 fp->pad_binder = 0;
3295 binder_alloc_copy_to_buffer(&target_proc->alloc,
3296 t->buffer, object_offset,
3297 fp, sizeof(*fp));
3298 } break;
3299 case BINDER_TYPE_FDA: {
3300 struct binder_object ptr_object;
3301 binder_size_t parent_offset;
3302 struct binder_fd_array_object *fda =
3303 to_binder_fd_array_object(hdr);
3304 size_t num_valid = (buffer_offset - off_start_offset) *
3305 sizeof(binder_size_t);
3306 struct binder_buffer_object *parent =
3307 binder_validate_ptr(target_proc, t->buffer,
3308 &ptr_object, fda->parent,
3309 off_start_offset,
3310 &parent_offset,
3311 num_valid);
3312 if (!parent) {
3313 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3314 proc->pid, thread->pid);
3315 return_error = BR_FAILED_REPLY;
3316 return_error_param = -EINVAL;
3317 return_error_line = __LINE__;
3318 goto err_bad_parent;
3319 }
3320 if (!binder_validate_fixup(target_proc, t->buffer,
3321 off_start_offset,
3322 parent_offset,
3323 fda->parent_offset,
3324 last_fixup_obj_off,
3325 last_fixup_min_off)) {
3326 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3327 proc->pid, thread->pid);
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = -EINVAL;
3330 return_error_line = __LINE__;
3331 goto err_bad_parent;
3332 }
3333 ret = binder_translate_fd_array(fda, parent, t, thread,
3334 in_reply_to);
3335 if (ret < 0) {
3336 return_error = BR_FAILED_REPLY;
3337 return_error_param = ret;
3338 return_error_line = __LINE__;
3339 goto err_translate_failed;
3340 }
3341 last_fixup_obj_off = parent_offset;
3342 last_fixup_min_off =
3343 fda->parent_offset + sizeof(u32) * fda->num_fds;
3344 } break;
3345 case BINDER_TYPE_PTR: {
3346 struct binder_buffer_object *bp =
3347 to_binder_buffer_object(hdr);
3348 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3349 size_t num_valid;
3350
3351 if (bp->length > buf_left) {
3352 binder_user_error("%d:%d got transaction with too large buffer\n",
3353 proc->pid, thread->pid);
3354 return_error = BR_FAILED_REPLY;
3355 return_error_param = -EINVAL;
3356 return_error_line = __LINE__;
3357 goto err_bad_offset;
3358 }
3359 if (binder_alloc_copy_user_to_buffer(
3360 &target_proc->alloc,
3361 t->buffer,
3362 sg_buf_offset,
3363 (const void __user *)
3364 (uintptr_t)bp->buffer,
3365 bp->length)) {
3366 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3367 proc->pid, thread->pid);
3368 return_error_param = -EFAULT;
3369 return_error = BR_FAILED_REPLY;
3370 return_error_line = __LINE__;
3371 goto err_copy_data_failed;
3372 }
3373 /* Fixup buffer pointer to target proc address space */
3374 bp->buffer = (uintptr_t)
3375 t->buffer->user_data + sg_buf_offset;
3376 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3377
3378 num_valid = (buffer_offset - off_start_offset) *
3379 sizeof(binder_size_t);
3380 ret = binder_fixup_parent(t, thread, bp,
3381 off_start_offset,
3382 num_valid,
3383 last_fixup_obj_off,
3384 last_fixup_min_off);
3385 if (ret < 0) {
3386 return_error = BR_FAILED_REPLY;
3387 return_error_param = ret;
3388 return_error_line = __LINE__;
3389 goto err_translate_failed;
3390 }
3391 binder_alloc_copy_to_buffer(&target_proc->alloc,
3392 t->buffer, object_offset,
3393 bp, sizeof(*bp));
3394 last_fixup_obj_off = object_offset;
3395 last_fixup_min_off = 0;
3396 } break;
3397 default:
3398 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3399 proc->pid, thread->pid, hdr->type);
3400 return_error = BR_FAILED_REPLY;
3401 return_error_param = -EINVAL;
3402 return_error_line = __LINE__;
3403 goto err_bad_object_type;
3404 }
3405 }
3406 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3407 t->work.type = BINDER_WORK_TRANSACTION;
3408
3409 if (reply) {
3410 binder_enqueue_thread_work(thread, tcomplete);
3411 binder_inner_proc_lock(target_proc);
3412 if (target_thread->is_dead) {
3413 binder_inner_proc_unlock(target_proc);
3414 goto err_dead_proc_or_thread;
3415 }
3416 BUG_ON(t->buffer->async_transaction != 0);
3417 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3418 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3419 binder_inner_proc_unlock(target_proc);
3420 wake_up_interruptible_sync(&target_thread->wait);
3421 binder_free_transaction(in_reply_to);
3422 } else if (!(t->flags & TF_ONE_WAY)) {
3423 BUG_ON(t->buffer->async_transaction != 0);
3424 binder_inner_proc_lock(proc);
3425 /*
3426 * Defer the TRANSACTION_COMPLETE, so we don't return to
3427 * userspace immediately; this allows the target process to
3428 * immediately start processing this transaction, reducing
3429 * latency. We will then return the TRANSACTION_COMPLETE when
3430 * the target replies (or there is an error).
3431 */
3432 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3433 t->need_reply = 1;
3434 t->from_parent = thread->transaction_stack;
3435 thread->transaction_stack = t;
3436 binder_inner_proc_unlock(proc);
3437 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3438 binder_inner_proc_lock(proc);
3439 binder_pop_transaction_ilocked(thread, t);
3440 binder_inner_proc_unlock(proc);
3441 goto err_dead_proc_or_thread;
3442 }
3443 } else {
3444 BUG_ON(target_node == NULL);
3445 BUG_ON(t->buffer->async_transaction != 1);
3446 binder_enqueue_thread_work(thread, tcomplete);
3447 if (!binder_proc_transaction(t, target_proc, NULL))
3448 goto err_dead_proc_or_thread;
3449 }
3450 if (target_thread)
3451 binder_thread_dec_tmpref(target_thread);
3452 binder_proc_dec_tmpref(target_proc);
3453 if (target_node)
3454 binder_dec_node_tmpref(target_node);
3455 /*
3456 * write barrier to synchronize with initialization
3457 * of log entry
3458 */
3459 smp_wmb();
3460 WRITE_ONCE(e->debug_id_done, t_debug_id);
3461 return;
3462
3463err_dead_proc_or_thread:
3464 return_error = BR_DEAD_REPLY;
3465 return_error_line = __LINE__;
3466 binder_dequeue_work(proc, tcomplete);
3467err_translate_failed:
3468err_bad_object_type:
3469err_bad_offset:
3470err_bad_parent:
3471err_copy_data_failed:
3472 binder_free_txn_fixups(t);
3473