1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* binder.c |
3 | * |
4 | * Android IPC Subsystem |
5 | * |
6 | * Copyright (C) 2007-2008 Google, Inc. |
7 | */ |
8 | |
9 | /* |
10 | * Locking overview |
11 | * |
12 | * There are 3 main spinlocks which must be acquired in the |
13 | * order shown: |
14 | * |
15 | * 1) proc->outer_lock : protects binder_ref |
16 | * binder_proc_lock() and binder_proc_unlock() are |
17 | * used to acq/rel. |
18 | * 2) node->lock : protects most fields of binder_node. |
19 | * binder_node_lock() and binder_node_unlock() are |
20 | * used to acq/rel |
21 | * 3) proc->inner_lock : protects the thread and node lists |
22 | * (proc->threads, proc->waiting_threads, proc->nodes) |
23 | * and all todo lists associated with the binder_proc |
24 | * (proc->todo, thread->todo, proc->delivered_death and |
25 | * node->async_todo), as well as thread->transaction_stack |
26 | * binder_inner_proc_lock() and binder_inner_proc_unlock() |
27 | * are used to acq/rel |
28 | * |
29 | * Any lock under procA must never be nested under any lock at the same |
30 | * level or below on procB. |
31 | * |
32 | * Functions that require a lock held on entry indicate which lock |
33 | * in the suffix of the function name: |
34 | * |
35 | * foo_olocked() : requires node->outer_lock |
36 | * foo_nlocked() : requires node->lock |
37 | * foo_ilocked() : requires proc->inner_lock |
38 | * foo_oilocked(): requires proc->outer_lock and proc->inner_lock |
39 | * foo_nilocked(): requires node->lock and proc->inner_lock |
40 | * ... |
41 | */ |
42 | |
43 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
44 | |
45 | #include <linux/fdtable.h> |
46 | #include <linux/file.h> |
47 | #include <linux/freezer.h> |
48 | #include <linux/fs.h> |
49 | #include <linux/list.h> |
50 | #include <linux/miscdevice.h> |
51 | #include <linux/module.h> |
52 | #include <linux/mutex.h> |
53 | #include <linux/nsproxy.h> |
54 | #include <linux/poll.h> |
55 | #include <linux/debugfs.h> |
56 | #include <linux/rbtree.h> |
57 | #include <linux/sched/signal.h> |
58 | #include <linux/sched/mm.h> |
59 | #include <linux/seq_file.h> |
60 | #include <linux/string.h> |
61 | #include <linux/uaccess.h> |
62 | #include <linux/pid_namespace.h> |
63 | #include <linux/security.h> |
64 | #include <linux/spinlock.h> |
65 | #include <linux/ratelimit.h> |
66 | #include <linux/syscalls.h> |
67 | #include <linux/task_work.h> |
68 | #include <linux/sizes.h> |
69 | #include <linux/ktime.h> |
70 | |
71 | #include <uapi/linux/android/binder.h> |
72 | |
73 | #include <linux/cacheflush.h> |
74 | |
75 | #include "binder_internal.h" |
76 | #include "binder_trace.h" |
77 | |
78 | static HLIST_HEAD(binder_deferred_list); |
79 | static DEFINE_MUTEX(binder_deferred_lock); |
80 | |
81 | static HLIST_HEAD(binder_devices); |
82 | static HLIST_HEAD(binder_procs); |
83 | static DEFINE_MUTEX(binder_procs_lock); |
84 | |
85 | static HLIST_HEAD(binder_dead_nodes); |
86 | static DEFINE_SPINLOCK(binder_dead_nodes_lock); |
87 | |
88 | static struct dentry *binder_debugfs_dir_entry_root; |
89 | static struct dentry *binder_debugfs_dir_entry_proc; |
90 | static atomic_t binder_last_id; |
91 | |
92 | static int proc_show(struct seq_file *m, void *unused); |
93 | DEFINE_SHOW_ATTRIBUTE(proc); |
94 | |
95 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) |
96 | |
97 | enum { |
98 | BINDER_DEBUG_USER_ERROR = 1U << 0, |
99 | BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, |
100 | BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, |
101 | BINDER_DEBUG_OPEN_CLOSE = 1U << 3, |
102 | BINDER_DEBUG_DEAD_BINDER = 1U << 4, |
103 | BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, |
104 | BINDER_DEBUG_READ_WRITE = 1U << 6, |
105 | BINDER_DEBUG_USER_REFS = 1U << 7, |
106 | BINDER_DEBUG_THREADS = 1U << 8, |
107 | BINDER_DEBUG_TRANSACTION = 1U << 9, |
108 | BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, |
109 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, |
110 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, |
111 | BINDER_DEBUG_PRIORITY_CAP = 1U << 13, |
112 | BINDER_DEBUG_SPINLOCKS = 1U << 14, |
113 | }; |
114 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | |
115 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; |
116 | module_param_named(debug_mask, binder_debug_mask, uint, 0644); |
117 | |
118 | char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; |
119 | module_param_named(devices, binder_devices_param, charp, 0444); |
120 | |
121 | static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); |
122 | static int binder_stop_on_user_error; |
123 | |
124 | static int binder_set_stop_on_user_error(const char *val, |
125 | const struct kernel_param *kp) |
126 | { |
127 | int ret; |
128 | |
129 | ret = param_set_int(val, kp); |
130 | if (binder_stop_on_user_error < 2) |
131 | wake_up(&binder_user_error_wait); |
132 | return ret; |
133 | } |
134 | module_param_call(stop_on_user_error, binder_set_stop_on_user_error, |
135 | param_get_int, &binder_stop_on_user_error, 0644); |
136 | |
137 | static __printf(2, 3) void binder_debug(int mask, const char *format, ...) |
138 | { |
139 | struct va_format vaf; |
140 | va_list args; |
141 | |
142 | if (binder_debug_mask & mask) { |
143 | va_start(args, format); |
144 | vaf.va = &args; |
145 | vaf.fmt = format; |
146 | pr_info_ratelimited("%pV" , &vaf); |
147 | va_end(args); |
148 | } |
149 | } |
150 | |
151 | #define binder_txn_error(x...) \ |
152 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) |
153 | |
154 | static __printf(1, 2) void binder_user_error(const char *format, ...) |
155 | { |
156 | struct va_format vaf; |
157 | va_list args; |
158 | |
159 | if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { |
160 | va_start(args, format); |
161 | vaf.va = &args; |
162 | vaf.fmt = format; |
163 | pr_info_ratelimited("%pV" , &vaf); |
164 | va_end(args); |
165 | } |
166 | |
167 | if (binder_stop_on_user_error) |
168 | binder_stop_on_user_error = 2; |
169 | } |
170 | |
171 | #define binder_set_extended_error(ee, _id, _command, _param) \ |
172 | do { \ |
173 | (ee)->id = _id; \ |
174 | (ee)->command = _command; \ |
175 | (ee)->param = _param; \ |
176 | } while (0) |
177 | |
178 | #define to_flat_binder_object(hdr) \ |
179 | container_of(hdr, struct flat_binder_object, hdr) |
180 | |
181 | #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) |
182 | |
183 | #define to_binder_buffer_object(hdr) \ |
184 | container_of(hdr, struct binder_buffer_object, hdr) |
185 | |
186 | #define to_binder_fd_array_object(hdr) \ |
187 | container_of(hdr, struct binder_fd_array_object, hdr) |
188 | |
189 | static struct binder_stats binder_stats; |
190 | |
191 | static inline void binder_stats_deleted(enum binder_stat_types type) |
192 | { |
193 | atomic_inc(v: &binder_stats.obj_deleted[type]); |
194 | } |
195 | |
196 | static inline void binder_stats_created(enum binder_stat_types type) |
197 | { |
198 | atomic_inc(v: &binder_stats.obj_created[type]); |
199 | } |
200 | |
201 | struct binder_transaction_log_entry { |
202 | int debug_id; |
203 | int debug_id_done; |
204 | int call_type; |
205 | int from_proc; |
206 | int from_thread; |
207 | int target_handle; |
208 | int to_proc; |
209 | int to_thread; |
210 | int to_node; |
211 | int data_size; |
212 | int offsets_size; |
213 | int return_error_line; |
214 | uint32_t return_error; |
215 | uint32_t return_error_param; |
216 | char context_name[BINDERFS_MAX_NAME + 1]; |
217 | }; |
218 | |
219 | struct binder_transaction_log { |
220 | atomic_t cur; |
221 | bool full; |
222 | struct binder_transaction_log_entry entry[32]; |
223 | }; |
224 | |
225 | static struct binder_transaction_log binder_transaction_log; |
226 | static struct binder_transaction_log binder_transaction_log_failed; |
227 | |
228 | static struct binder_transaction_log_entry *binder_transaction_log_add( |
229 | struct binder_transaction_log *log) |
230 | { |
231 | struct binder_transaction_log_entry *e; |
232 | unsigned int cur = atomic_inc_return(v: &log->cur); |
233 | |
234 | if (cur >= ARRAY_SIZE(log->entry)) |
235 | log->full = true; |
236 | e = &log->entry[cur % ARRAY_SIZE(log->entry)]; |
237 | WRITE_ONCE(e->debug_id_done, 0); |
238 | /* |
239 | * write-barrier to synchronize access to e->debug_id_done. |
240 | * We make sure the initialized 0 value is seen before |
241 | * memset() other fields are zeroed by memset. |
242 | */ |
243 | smp_wmb(); |
244 | memset(e, 0, sizeof(*e)); |
245 | return e; |
246 | } |
247 | |
248 | enum binder_deferred_state { |
249 | BINDER_DEFERRED_FLUSH = 0x01, |
250 | BINDER_DEFERRED_RELEASE = 0x02, |
251 | }; |
252 | |
253 | enum { |
254 | BINDER_LOOPER_STATE_REGISTERED = 0x01, |
255 | BINDER_LOOPER_STATE_ENTERED = 0x02, |
256 | BINDER_LOOPER_STATE_EXITED = 0x04, |
257 | BINDER_LOOPER_STATE_INVALID = 0x08, |
258 | BINDER_LOOPER_STATE_WAITING = 0x10, |
259 | BINDER_LOOPER_STATE_POLL = 0x20, |
260 | }; |
261 | |
262 | /** |
263 | * binder_proc_lock() - Acquire outer lock for given binder_proc |
264 | * @proc: struct binder_proc to acquire |
265 | * |
266 | * Acquires proc->outer_lock. Used to protect binder_ref |
267 | * structures associated with the given proc. |
268 | */ |
269 | #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) |
270 | static void |
271 | _binder_proc_lock(struct binder_proc *proc, int line) |
272 | __acquires(&proc->outer_lock) |
273 | { |
274 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
275 | format: "%s: line=%d\n" , __func__, line); |
276 | spin_lock(lock: &proc->outer_lock); |
277 | } |
278 | |
279 | /** |
280 | * binder_proc_unlock() - Release spinlock for given binder_proc |
281 | * @proc: struct binder_proc to acquire |
282 | * |
283 | * Release lock acquired via binder_proc_lock() |
284 | */ |
285 | #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__) |
286 | static void |
287 | _binder_proc_unlock(struct binder_proc *proc, int line) |
288 | __releases(&proc->outer_lock) |
289 | { |
290 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
291 | format: "%s: line=%d\n" , __func__, line); |
292 | spin_unlock(lock: &proc->outer_lock); |
293 | } |
294 | |
295 | /** |
296 | * binder_inner_proc_lock() - Acquire inner lock for given binder_proc |
297 | * @proc: struct binder_proc to acquire |
298 | * |
299 | * Acquires proc->inner_lock. Used to protect todo lists |
300 | */ |
301 | #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) |
302 | static void |
303 | _binder_inner_proc_lock(struct binder_proc *proc, int line) |
304 | __acquires(&proc->inner_lock) |
305 | { |
306 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
307 | format: "%s: line=%d\n" , __func__, line); |
308 | spin_lock(lock: &proc->inner_lock); |
309 | } |
310 | |
311 | /** |
312 | * binder_inner_proc_unlock() - Release inner lock for given binder_proc |
313 | * @proc: struct binder_proc to acquire |
314 | * |
315 | * Release lock acquired via binder_inner_proc_lock() |
316 | */ |
317 | #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) |
318 | static void |
319 | _binder_inner_proc_unlock(struct binder_proc *proc, int line) |
320 | __releases(&proc->inner_lock) |
321 | { |
322 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
323 | format: "%s: line=%d\n" , __func__, line); |
324 | spin_unlock(lock: &proc->inner_lock); |
325 | } |
326 | |
327 | /** |
328 | * binder_node_lock() - Acquire spinlock for given binder_node |
329 | * @node: struct binder_node to acquire |
330 | * |
331 | * Acquires node->lock. Used to protect binder_node fields |
332 | */ |
333 | #define binder_node_lock(node) _binder_node_lock(node, __LINE__) |
334 | static void |
335 | _binder_node_lock(struct binder_node *node, int line) |
336 | __acquires(&node->lock) |
337 | { |
338 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
339 | format: "%s: line=%d\n" , __func__, line); |
340 | spin_lock(lock: &node->lock); |
341 | } |
342 | |
343 | /** |
344 | * binder_node_unlock() - Release spinlock for given binder_proc |
345 | * @node: struct binder_node to acquire |
346 | * |
347 | * Release lock acquired via binder_node_lock() |
348 | */ |
349 | #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) |
350 | static void |
351 | _binder_node_unlock(struct binder_node *node, int line) |
352 | __releases(&node->lock) |
353 | { |
354 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
355 | format: "%s: line=%d\n" , __func__, line); |
356 | spin_unlock(lock: &node->lock); |
357 | } |
358 | |
359 | /** |
360 | * binder_node_inner_lock() - Acquire node and inner locks |
361 | * @node: struct binder_node to acquire |
362 | * |
363 | * Acquires node->lock. If node->proc also acquires |
364 | * proc->inner_lock. Used to protect binder_node fields |
365 | */ |
366 | #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) |
367 | static void |
368 | _binder_node_inner_lock(struct binder_node *node, int line) |
369 | __acquires(&node->lock) __acquires(&node->proc->inner_lock) |
370 | { |
371 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
372 | format: "%s: line=%d\n" , __func__, line); |
373 | spin_lock(lock: &node->lock); |
374 | if (node->proc) |
375 | binder_inner_proc_lock(node->proc); |
376 | else |
377 | /* annotation for sparse */ |
378 | __acquire(&node->proc->inner_lock); |
379 | } |
380 | |
381 | /** |
382 | * binder_node_inner_unlock() - Release node and inner locks |
383 | * @node: struct binder_node to acquire |
384 | * |
385 | * Release lock acquired via binder_node_lock() |
386 | */ |
387 | #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) |
388 | static void |
389 | _binder_node_inner_unlock(struct binder_node *node, int line) |
390 | __releases(&node->lock) __releases(&node->proc->inner_lock) |
391 | { |
392 | struct binder_proc *proc = node->proc; |
393 | |
394 | binder_debug(mask: BINDER_DEBUG_SPINLOCKS, |
395 | format: "%s: line=%d\n" , __func__, line); |
396 | if (proc) |
397 | binder_inner_proc_unlock(proc); |
398 | else |
399 | /* annotation for sparse */ |
400 | __release(&node->proc->inner_lock); |
401 | spin_unlock(lock: &node->lock); |
402 | } |
403 | |
404 | static bool binder_worklist_empty_ilocked(struct list_head *list) |
405 | { |
406 | return list_empty(head: list); |
407 | } |
408 | |
409 | /** |
410 | * binder_worklist_empty() - Check if no items on the work list |
411 | * @proc: binder_proc associated with list |
412 | * @list: list to check |
413 | * |
414 | * Return: true if there are no items on list, else false |
415 | */ |
416 | static bool binder_worklist_empty(struct binder_proc *proc, |
417 | struct list_head *list) |
418 | { |
419 | bool ret; |
420 | |
421 | binder_inner_proc_lock(proc); |
422 | ret = binder_worklist_empty_ilocked(list); |
423 | binder_inner_proc_unlock(proc); |
424 | return ret; |
425 | } |
426 | |
427 | /** |
428 | * binder_enqueue_work_ilocked() - Add an item to the work list |
429 | * @work: struct binder_work to add to list |
430 | * @target_list: list to add work to |
431 | * |
432 | * Adds the work to the specified list. Asserts that work |
433 | * is not already on a list. |
434 | * |
435 | * Requires the proc->inner_lock to be held. |
436 | */ |
437 | static void |
438 | binder_enqueue_work_ilocked(struct binder_work *work, |
439 | struct list_head *target_list) |
440 | { |
441 | BUG_ON(target_list == NULL); |
442 | BUG_ON(work->entry.next && !list_empty(&work->entry)); |
443 | list_add_tail(new: &work->entry, head: target_list); |
444 | } |
445 | |
446 | /** |
447 | * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work |
448 | * @thread: thread to queue work to |
449 | * @work: struct binder_work to add to list |
450 | * |
451 | * Adds the work to the todo list of the thread. Doesn't set the process_todo |
452 | * flag, which means that (if it wasn't already set) the thread will go to |
453 | * sleep without handling this work when it calls read. |
454 | * |
455 | * Requires the proc->inner_lock to be held. |
456 | */ |
457 | static void |
458 | binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, |
459 | struct binder_work *work) |
460 | { |
461 | WARN_ON(!list_empty(&thread->waiting_thread_node)); |
462 | binder_enqueue_work_ilocked(work, target_list: &thread->todo); |
463 | } |
464 | |
465 | /** |
466 | * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list |
467 | * @thread: thread to queue work to |
468 | * @work: struct binder_work to add to list |
469 | * |
470 | * Adds the work to the todo list of the thread, and enables processing |
471 | * of the todo queue. |
472 | * |
473 | * Requires the proc->inner_lock to be held. |
474 | */ |
475 | static void |
476 | binder_enqueue_thread_work_ilocked(struct binder_thread *thread, |
477 | struct binder_work *work) |
478 | { |
479 | WARN_ON(!list_empty(&thread->waiting_thread_node)); |
480 | binder_enqueue_work_ilocked(work, target_list: &thread->todo); |
481 | thread->process_todo = true; |
482 | } |
483 | |
484 | /** |
485 | * binder_enqueue_thread_work() - Add an item to the thread work list |
486 | * @thread: thread to queue work to |
487 | * @work: struct binder_work to add to list |
488 | * |
489 | * Adds the work to the todo list of the thread, and enables processing |
490 | * of the todo queue. |
491 | */ |
492 | static void |
493 | binder_enqueue_thread_work(struct binder_thread *thread, |
494 | struct binder_work *work) |
495 | { |
496 | binder_inner_proc_lock(thread->proc); |
497 | binder_enqueue_thread_work_ilocked(thread, work); |
498 | binder_inner_proc_unlock(thread->proc); |
499 | } |
500 | |
501 | static void |
502 | binder_dequeue_work_ilocked(struct binder_work *work) |
503 | { |
504 | list_del_init(entry: &work->entry); |
505 | } |
506 | |
507 | /** |
508 | * binder_dequeue_work() - Removes an item from the work list |
509 | * @proc: binder_proc associated with list |
510 | * @work: struct binder_work to remove from list |
511 | * |
512 | * Removes the specified work item from whatever list it is on. |
513 | * Can safely be called if work is not on any list. |
514 | */ |
515 | static void |
516 | binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) |
517 | { |
518 | binder_inner_proc_lock(proc); |
519 | binder_dequeue_work_ilocked(work); |
520 | binder_inner_proc_unlock(proc); |
521 | } |
522 | |
523 | static struct binder_work *binder_dequeue_work_head_ilocked( |
524 | struct list_head *list) |
525 | { |
526 | struct binder_work *w; |
527 | |
528 | w = list_first_entry_or_null(list, struct binder_work, entry); |
529 | if (w) |
530 | list_del_init(entry: &w->entry); |
531 | return w; |
532 | } |
533 | |
534 | static void |
535 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); |
536 | static void binder_free_thread(struct binder_thread *thread); |
537 | static void binder_free_proc(struct binder_proc *proc); |
538 | static void binder_inc_node_tmpref_ilocked(struct binder_node *node); |
539 | |
540 | static bool binder_has_work_ilocked(struct binder_thread *thread, |
541 | bool do_proc_work) |
542 | { |
543 | return thread->process_todo || |
544 | thread->looper_need_return || |
545 | (do_proc_work && |
546 | !binder_worklist_empty_ilocked(list: &thread->proc->todo)); |
547 | } |
548 | |
549 | static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) |
550 | { |
551 | bool has_work; |
552 | |
553 | binder_inner_proc_lock(thread->proc); |
554 | has_work = binder_has_work_ilocked(thread, do_proc_work); |
555 | binder_inner_proc_unlock(thread->proc); |
556 | |
557 | return has_work; |
558 | } |
559 | |
560 | static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) |
561 | { |
562 | return !thread->transaction_stack && |
563 | binder_worklist_empty_ilocked(list: &thread->todo) && |
564 | (thread->looper & (BINDER_LOOPER_STATE_ENTERED | |
565 | BINDER_LOOPER_STATE_REGISTERED)); |
566 | } |
567 | |
568 | static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, |
569 | bool sync) |
570 | { |
571 | struct rb_node *n; |
572 | struct binder_thread *thread; |
573 | |
574 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { |
575 | thread = rb_entry(n, struct binder_thread, rb_node); |
576 | if (thread->looper & BINDER_LOOPER_STATE_POLL && |
577 | binder_available_for_proc_work_ilocked(thread)) { |
578 | if (sync) |
579 | wake_up_interruptible_sync(&thread->wait); |
580 | else |
581 | wake_up_interruptible(&thread->wait); |
582 | } |
583 | } |
584 | } |
585 | |
586 | /** |
587 | * binder_select_thread_ilocked() - selects a thread for doing proc work. |
588 | * @proc: process to select a thread from |
589 | * |
590 | * Note that calling this function moves the thread off the waiting_threads |
591 | * list, so it can only be woken up by the caller of this function, or a |
592 | * signal. Therefore, callers *should* always wake up the thread this function |
593 | * returns. |
594 | * |
595 | * Return: If there's a thread currently waiting for process work, |
596 | * returns that thread. Otherwise returns NULL. |
597 | */ |
598 | static struct binder_thread * |
599 | binder_select_thread_ilocked(struct binder_proc *proc) |
600 | { |
601 | struct binder_thread *thread; |
602 | |
603 | assert_spin_locked(&proc->inner_lock); |
604 | thread = list_first_entry_or_null(&proc->waiting_threads, |
605 | struct binder_thread, |
606 | waiting_thread_node); |
607 | |
608 | if (thread) |
609 | list_del_init(entry: &thread->waiting_thread_node); |
610 | |
611 | return thread; |
612 | } |
613 | |
614 | /** |
615 | * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. |
616 | * @proc: process to wake up a thread in |
617 | * @thread: specific thread to wake-up (may be NULL) |
618 | * @sync: whether to do a synchronous wake-up |
619 | * |
620 | * This function wakes up a thread in the @proc process. |
621 | * The caller may provide a specific thread to wake-up in |
622 | * the @thread parameter. If @thread is NULL, this function |
623 | * will wake up threads that have called poll(). |
624 | * |
625 | * Note that for this function to work as expected, callers |
626 | * should first call binder_select_thread() to find a thread |
627 | * to handle the work (if they don't have a thread already), |
628 | * and pass the result into the @thread parameter. |
629 | */ |
630 | static void binder_wakeup_thread_ilocked(struct binder_proc *proc, |
631 | struct binder_thread *thread, |
632 | bool sync) |
633 | { |
634 | assert_spin_locked(&proc->inner_lock); |
635 | |
636 | if (thread) { |
637 | if (sync) |
638 | wake_up_interruptible_sync(&thread->wait); |
639 | else |
640 | wake_up_interruptible(&thread->wait); |
641 | return; |
642 | } |
643 | |
644 | /* Didn't find a thread waiting for proc work; this can happen |
645 | * in two scenarios: |
646 | * 1. All threads are busy handling transactions |
647 | * In that case, one of those threads should call back into |
648 | * the kernel driver soon and pick up this work. |
649 | * 2. Threads are using the (e)poll interface, in which case |
650 | * they may be blocked on the waitqueue without having been |
651 | * added to waiting_threads. For this case, we just iterate |
652 | * over all threads not handling transaction work, and |
653 | * wake them all up. We wake all because we don't know whether |
654 | * a thread that called into (e)poll is handling non-binder |
655 | * work currently. |
656 | */ |
657 | binder_wakeup_poll_threads_ilocked(proc, sync); |
658 | } |
659 | |
660 | static void binder_wakeup_proc_ilocked(struct binder_proc *proc) |
661 | { |
662 | struct binder_thread *thread = binder_select_thread_ilocked(proc); |
663 | |
664 | binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); |
665 | } |
666 | |
667 | static void binder_set_nice(long nice) |
668 | { |
669 | long min_nice; |
670 | |
671 | if (can_nice(current, nice)) { |
672 | set_user_nice(current, nice); |
673 | return; |
674 | } |
675 | min_nice = rlimit_to_nice(prio: rlimit(RLIMIT_NICE)); |
676 | binder_debug(mask: BINDER_DEBUG_PRIORITY_CAP, |
677 | format: "%d: nice value %ld not allowed use %ld instead\n" , |
678 | current->pid, nice, min_nice); |
679 | set_user_nice(current, nice: min_nice); |
680 | if (min_nice <= MAX_NICE) |
681 | return; |
682 | binder_user_error(format: "%d RLIMIT_NICE not set\n" , current->pid); |
683 | } |
684 | |
685 | static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, |
686 | binder_uintptr_t ptr) |
687 | { |
688 | struct rb_node *n = proc->nodes.rb_node; |
689 | struct binder_node *node; |
690 | |
691 | assert_spin_locked(&proc->inner_lock); |
692 | |
693 | while (n) { |
694 | node = rb_entry(n, struct binder_node, rb_node); |
695 | |
696 | if (ptr < node->ptr) |
697 | n = n->rb_left; |
698 | else if (ptr > node->ptr) |
699 | n = n->rb_right; |
700 | else { |
701 | /* |
702 | * take an implicit weak reference |
703 | * to ensure node stays alive until |
704 | * call to binder_put_node() |
705 | */ |
706 | binder_inc_node_tmpref_ilocked(node); |
707 | return node; |
708 | } |
709 | } |
710 | return NULL; |
711 | } |
712 | |
713 | static struct binder_node *binder_get_node(struct binder_proc *proc, |
714 | binder_uintptr_t ptr) |
715 | { |
716 | struct binder_node *node; |
717 | |
718 | binder_inner_proc_lock(proc); |
719 | node = binder_get_node_ilocked(proc, ptr); |
720 | binder_inner_proc_unlock(proc); |
721 | return node; |
722 | } |
723 | |
724 | static struct binder_node *binder_init_node_ilocked( |
725 | struct binder_proc *proc, |
726 | struct binder_node *new_node, |
727 | struct flat_binder_object *fp) |
728 | { |
729 | struct rb_node **p = &proc->nodes.rb_node; |
730 | struct rb_node *parent = NULL; |
731 | struct binder_node *node; |
732 | binder_uintptr_t ptr = fp ? fp->binder : 0; |
733 | binder_uintptr_t cookie = fp ? fp->cookie : 0; |
734 | __u32 flags = fp ? fp->flags : 0; |
735 | |
736 | assert_spin_locked(&proc->inner_lock); |
737 | |
738 | while (*p) { |
739 | |
740 | parent = *p; |
741 | node = rb_entry(parent, struct binder_node, rb_node); |
742 | |
743 | if (ptr < node->ptr) |
744 | p = &(*p)->rb_left; |
745 | else if (ptr > node->ptr) |
746 | p = &(*p)->rb_right; |
747 | else { |
748 | /* |
749 | * A matching node is already in |
750 | * the rb tree. Abandon the init |
751 | * and return it. |
752 | */ |
753 | binder_inc_node_tmpref_ilocked(node); |
754 | return node; |
755 | } |
756 | } |
757 | node = new_node; |
758 | binder_stats_created(type: BINDER_STAT_NODE); |
759 | node->tmp_refs++; |
760 | rb_link_node(node: &node->rb_node, parent, rb_link: p); |
761 | rb_insert_color(&node->rb_node, &proc->nodes); |
762 | node->debug_id = atomic_inc_return(v: &binder_last_id); |
763 | node->proc = proc; |
764 | node->ptr = ptr; |
765 | node->cookie = cookie; |
766 | node->work.type = BINDER_WORK_NODE; |
767 | node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; |
768 | node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); |
769 | node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); |
770 | spin_lock_init(&node->lock); |
771 | INIT_LIST_HEAD(list: &node->work.entry); |
772 | INIT_LIST_HEAD(list: &node->async_todo); |
773 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
774 | format: "%d:%d node %d u%016llx c%016llx created\n" , |
775 | proc->pid, current->pid, node->debug_id, |
776 | (u64)node->ptr, (u64)node->cookie); |
777 | |
778 | return node; |
779 | } |
780 | |
781 | static struct binder_node *binder_new_node(struct binder_proc *proc, |
782 | struct flat_binder_object *fp) |
783 | { |
784 | struct binder_node *node; |
785 | struct binder_node *new_node = kzalloc(size: sizeof(*node), GFP_KERNEL); |
786 | |
787 | if (!new_node) |
788 | return NULL; |
789 | binder_inner_proc_lock(proc); |
790 | node = binder_init_node_ilocked(proc, new_node, fp); |
791 | binder_inner_proc_unlock(proc); |
792 | if (node != new_node) |
793 | /* |
794 | * The node was already added by another thread |
795 | */ |
796 | kfree(objp: new_node); |
797 | |
798 | return node; |
799 | } |
800 | |
801 | static void binder_free_node(struct binder_node *node) |
802 | { |
803 | kfree(objp: node); |
804 | binder_stats_deleted(type: BINDER_STAT_NODE); |
805 | } |
806 | |
807 | static int binder_inc_node_nilocked(struct binder_node *node, int strong, |
808 | int internal, |
809 | struct list_head *target_list) |
810 | { |
811 | struct binder_proc *proc = node->proc; |
812 | |
813 | assert_spin_locked(&node->lock); |
814 | if (proc) |
815 | assert_spin_locked(&proc->inner_lock); |
816 | if (strong) { |
817 | if (internal) { |
818 | if (target_list == NULL && |
819 | node->internal_strong_refs == 0 && |
820 | !(node->proc && |
821 | node == node->proc->context->binder_context_mgr_node && |
822 | node->has_strong_ref)) { |
823 | pr_err("invalid inc strong node for %d\n" , |
824 | node->debug_id); |
825 | return -EINVAL; |
826 | } |
827 | node->internal_strong_refs++; |
828 | } else |
829 | node->local_strong_refs++; |
830 | if (!node->has_strong_ref && target_list) { |
831 | struct binder_thread *thread = container_of(target_list, |
832 | struct binder_thread, todo); |
833 | binder_dequeue_work_ilocked(work: &node->work); |
834 | BUG_ON(&thread->todo != target_list); |
835 | binder_enqueue_deferred_thread_work_ilocked(thread, |
836 | work: &node->work); |
837 | } |
838 | } else { |
839 | if (!internal) |
840 | node->local_weak_refs++; |
841 | if (!node->has_weak_ref && list_empty(head: &node->work.entry)) { |
842 | if (target_list == NULL) { |
843 | pr_err("invalid inc weak node for %d\n" , |
844 | node->debug_id); |
845 | return -EINVAL; |
846 | } |
847 | /* |
848 | * See comment above |
849 | */ |
850 | binder_enqueue_work_ilocked(work: &node->work, target_list); |
851 | } |
852 | } |
853 | return 0; |
854 | } |
855 | |
856 | static int binder_inc_node(struct binder_node *node, int strong, int internal, |
857 | struct list_head *target_list) |
858 | { |
859 | int ret; |
860 | |
861 | binder_node_inner_lock(node); |
862 | ret = binder_inc_node_nilocked(node, strong, internal, target_list); |
863 | binder_node_inner_unlock(node); |
864 | |
865 | return ret; |
866 | } |
867 | |
868 | static bool binder_dec_node_nilocked(struct binder_node *node, |
869 | int strong, int internal) |
870 | { |
871 | struct binder_proc *proc = node->proc; |
872 | |
873 | assert_spin_locked(&node->lock); |
874 | if (proc) |
875 | assert_spin_locked(&proc->inner_lock); |
876 | if (strong) { |
877 | if (internal) |
878 | node->internal_strong_refs--; |
879 | else |
880 | node->local_strong_refs--; |
881 | if (node->local_strong_refs || node->internal_strong_refs) |
882 | return false; |
883 | } else { |
884 | if (!internal) |
885 | node->local_weak_refs--; |
886 | if (node->local_weak_refs || node->tmp_refs || |
887 | !hlist_empty(h: &node->refs)) |
888 | return false; |
889 | } |
890 | |
891 | if (proc && (node->has_strong_ref || node->has_weak_ref)) { |
892 | if (list_empty(head: &node->work.entry)) { |
893 | binder_enqueue_work_ilocked(work: &node->work, target_list: &proc->todo); |
894 | binder_wakeup_proc_ilocked(proc); |
895 | } |
896 | } else { |
897 | if (hlist_empty(h: &node->refs) && !node->local_strong_refs && |
898 | !node->local_weak_refs && !node->tmp_refs) { |
899 | if (proc) { |
900 | binder_dequeue_work_ilocked(work: &node->work); |
901 | rb_erase(&node->rb_node, &proc->nodes); |
902 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
903 | format: "refless node %d deleted\n" , |
904 | node->debug_id); |
905 | } else { |
906 | BUG_ON(!list_empty(&node->work.entry)); |
907 | spin_lock(lock: &binder_dead_nodes_lock); |
908 | /* |
909 | * tmp_refs could have changed so |
910 | * check it again |
911 | */ |
912 | if (node->tmp_refs) { |
913 | spin_unlock(lock: &binder_dead_nodes_lock); |
914 | return false; |
915 | } |
916 | hlist_del(n: &node->dead_node); |
917 | spin_unlock(lock: &binder_dead_nodes_lock); |
918 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
919 | format: "dead node %d deleted\n" , |
920 | node->debug_id); |
921 | } |
922 | return true; |
923 | } |
924 | } |
925 | return false; |
926 | } |
927 | |
928 | static void binder_dec_node(struct binder_node *node, int strong, int internal) |
929 | { |
930 | bool free_node; |
931 | |
932 | binder_node_inner_lock(node); |
933 | free_node = binder_dec_node_nilocked(node, strong, internal); |
934 | binder_node_inner_unlock(node); |
935 | if (free_node) |
936 | binder_free_node(node); |
937 | } |
938 | |
939 | static void binder_inc_node_tmpref_ilocked(struct binder_node *node) |
940 | { |
941 | /* |
942 | * No call to binder_inc_node() is needed since we |
943 | * don't need to inform userspace of any changes to |
944 | * tmp_refs |
945 | */ |
946 | node->tmp_refs++; |
947 | } |
948 | |
949 | /** |
950 | * binder_inc_node_tmpref() - take a temporary reference on node |
951 | * @node: node to reference |
952 | * |
953 | * Take reference on node to prevent the node from being freed |
954 | * while referenced only by a local variable. The inner lock is |
955 | * needed to serialize with the node work on the queue (which |
956 | * isn't needed after the node is dead). If the node is dead |
957 | * (node->proc is NULL), use binder_dead_nodes_lock to protect |
958 | * node->tmp_refs against dead-node-only cases where the node |
959 | * lock cannot be acquired (eg traversing the dead node list to |
960 | * print nodes) |
961 | */ |
962 | static void binder_inc_node_tmpref(struct binder_node *node) |
963 | { |
964 | binder_node_lock(node); |
965 | if (node->proc) |
966 | binder_inner_proc_lock(node->proc); |
967 | else |
968 | spin_lock(lock: &binder_dead_nodes_lock); |
969 | binder_inc_node_tmpref_ilocked(node); |
970 | if (node->proc) |
971 | binder_inner_proc_unlock(node->proc); |
972 | else |
973 | spin_unlock(lock: &binder_dead_nodes_lock); |
974 | binder_node_unlock(node); |
975 | } |
976 | |
977 | /** |
978 | * binder_dec_node_tmpref() - remove a temporary reference on node |
979 | * @node: node to reference |
980 | * |
981 | * Release temporary reference on node taken via binder_inc_node_tmpref() |
982 | */ |
983 | static void binder_dec_node_tmpref(struct binder_node *node) |
984 | { |
985 | bool free_node; |
986 | |
987 | binder_node_inner_lock(node); |
988 | if (!node->proc) |
989 | spin_lock(lock: &binder_dead_nodes_lock); |
990 | else |
991 | __acquire(&binder_dead_nodes_lock); |
992 | node->tmp_refs--; |
993 | BUG_ON(node->tmp_refs < 0); |
994 | if (!node->proc) |
995 | spin_unlock(lock: &binder_dead_nodes_lock); |
996 | else |
997 | __release(&binder_dead_nodes_lock); |
998 | /* |
999 | * Call binder_dec_node() to check if all refcounts are 0 |
1000 | * and cleanup is needed. Calling with strong=0 and internal=1 |
1001 | * causes no actual reference to be released in binder_dec_node(). |
1002 | * If that changes, a change is needed here too. |
1003 | */ |
1004 | free_node = binder_dec_node_nilocked(node, strong: 0, internal: 1); |
1005 | binder_node_inner_unlock(node); |
1006 | if (free_node) |
1007 | binder_free_node(node); |
1008 | } |
1009 | |
1010 | static void binder_put_node(struct binder_node *node) |
1011 | { |
1012 | binder_dec_node_tmpref(node); |
1013 | } |
1014 | |
1015 | static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, |
1016 | u32 desc, bool need_strong_ref) |
1017 | { |
1018 | struct rb_node *n = proc->refs_by_desc.rb_node; |
1019 | struct binder_ref *ref; |
1020 | |
1021 | while (n) { |
1022 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
1023 | |
1024 | if (desc < ref->data.desc) { |
1025 | n = n->rb_left; |
1026 | } else if (desc > ref->data.desc) { |
1027 | n = n->rb_right; |
1028 | } else if (need_strong_ref && !ref->data.strong) { |
1029 | binder_user_error(format: "tried to use weak ref as strong ref\n" ); |
1030 | return NULL; |
1031 | } else { |
1032 | return ref; |
1033 | } |
1034 | } |
1035 | return NULL; |
1036 | } |
1037 | |
1038 | /** |
1039 | * binder_get_ref_for_node_olocked() - get the ref associated with given node |
1040 | * @proc: binder_proc that owns the ref |
1041 | * @node: binder_node of target |
1042 | * @new_ref: newly allocated binder_ref to be initialized or %NULL |
1043 | * |
1044 | * Look up the ref for the given node and return it if it exists |
1045 | * |
1046 | * If it doesn't exist and the caller provides a newly allocated |
1047 | * ref, initialize the fields of the newly allocated ref and insert |
1048 | * into the given proc rb_trees and node refs list. |
1049 | * |
1050 | * Return: the ref for node. It is possible that another thread |
1051 | * allocated/initialized the ref first in which case the |
1052 | * returned ref would be different than the passed-in |
1053 | * new_ref. new_ref must be kfree'd by the caller in |
1054 | * this case. |
1055 | */ |
1056 | static struct binder_ref *binder_get_ref_for_node_olocked( |
1057 | struct binder_proc *proc, |
1058 | struct binder_node *node, |
1059 | struct binder_ref *new_ref) |
1060 | { |
1061 | struct binder_context *context = proc->context; |
1062 | struct rb_node **p = &proc->refs_by_node.rb_node; |
1063 | struct rb_node *parent = NULL; |
1064 | struct binder_ref *ref; |
1065 | struct rb_node *n; |
1066 | |
1067 | while (*p) { |
1068 | parent = *p; |
1069 | ref = rb_entry(parent, struct binder_ref, rb_node_node); |
1070 | |
1071 | if (node < ref->node) |
1072 | p = &(*p)->rb_left; |
1073 | else if (node > ref->node) |
1074 | p = &(*p)->rb_right; |
1075 | else |
1076 | return ref; |
1077 | } |
1078 | if (!new_ref) |
1079 | return NULL; |
1080 | |
1081 | binder_stats_created(type: BINDER_STAT_REF); |
1082 | new_ref->data.debug_id = atomic_inc_return(v: &binder_last_id); |
1083 | new_ref->proc = proc; |
1084 | new_ref->node = node; |
1085 | rb_link_node(node: &new_ref->rb_node_node, parent, rb_link: p); |
1086 | rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); |
1087 | |
1088 | new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; |
1089 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
1090 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
1091 | if (ref->data.desc > new_ref->data.desc) |
1092 | break; |
1093 | new_ref->data.desc = ref->data.desc + 1; |
1094 | } |
1095 | |
1096 | p = &proc->refs_by_desc.rb_node; |
1097 | while (*p) { |
1098 | parent = *p; |
1099 | ref = rb_entry(parent, struct binder_ref, rb_node_desc); |
1100 | |
1101 | if (new_ref->data.desc < ref->data.desc) |
1102 | p = &(*p)->rb_left; |
1103 | else if (new_ref->data.desc > ref->data.desc) |
1104 | p = &(*p)->rb_right; |
1105 | else |
1106 | BUG(); |
1107 | } |
1108 | rb_link_node(node: &new_ref->rb_node_desc, parent, rb_link: p); |
1109 | rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); |
1110 | |
1111 | binder_node_lock(node); |
1112 | hlist_add_head(n: &new_ref->node_entry, h: &node->refs); |
1113 | |
1114 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
1115 | format: "%d new ref %d desc %d for node %d\n" , |
1116 | proc->pid, new_ref->data.debug_id, new_ref->data.desc, |
1117 | node->debug_id); |
1118 | binder_node_unlock(node); |
1119 | return new_ref; |
1120 | } |
1121 | |
1122 | static void binder_cleanup_ref_olocked(struct binder_ref *ref) |
1123 | { |
1124 | bool delete_node = false; |
1125 | |
1126 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
1127 | format: "%d delete ref %d desc %d for node %d\n" , |
1128 | ref->proc->pid, ref->data.debug_id, ref->data.desc, |
1129 | ref->node->debug_id); |
1130 | |
1131 | rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); |
1132 | rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); |
1133 | |
1134 | binder_node_inner_lock(ref->node); |
1135 | if (ref->data.strong) |
1136 | binder_dec_node_nilocked(node: ref->node, strong: 1, internal: 1); |
1137 | |
1138 | hlist_del(n: &ref->node_entry); |
1139 | delete_node = binder_dec_node_nilocked(node: ref->node, strong: 0, internal: 1); |
1140 | binder_node_inner_unlock(ref->node); |
1141 | /* |
1142 | * Clear ref->node unless we want the caller to free the node |
1143 | */ |
1144 | if (!delete_node) { |
1145 | /* |
1146 | * The caller uses ref->node to determine |
1147 | * whether the node needs to be freed. Clear |
1148 | * it since the node is still alive. |
1149 | */ |
1150 | ref->node = NULL; |
1151 | } |
1152 | |
1153 | if (ref->death) { |
1154 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
1155 | format: "%d delete ref %d desc %d has death notification\n" , |
1156 | ref->proc->pid, ref->data.debug_id, |
1157 | ref->data.desc); |
1158 | binder_dequeue_work(proc: ref->proc, work: &ref->death->work); |
1159 | binder_stats_deleted(type: BINDER_STAT_DEATH); |
1160 | } |
1161 | binder_stats_deleted(type: BINDER_STAT_REF); |
1162 | } |
1163 | |
1164 | /** |
1165 | * binder_inc_ref_olocked() - increment the ref for given handle |
1166 | * @ref: ref to be incremented |
1167 | * @strong: if true, strong increment, else weak |
1168 | * @target_list: list to queue node work on |
1169 | * |
1170 | * Increment the ref. @ref->proc->outer_lock must be held on entry |
1171 | * |
1172 | * Return: 0, if successful, else errno |
1173 | */ |
1174 | static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, |
1175 | struct list_head *target_list) |
1176 | { |
1177 | int ret; |
1178 | |
1179 | if (strong) { |
1180 | if (ref->data.strong == 0) { |
1181 | ret = binder_inc_node(node: ref->node, strong: 1, internal: 1, target_list); |
1182 | if (ret) |
1183 | return ret; |
1184 | } |
1185 | ref->data.strong++; |
1186 | } else { |
1187 | if (ref->data.weak == 0) { |
1188 | ret = binder_inc_node(node: ref->node, strong: 0, internal: 1, target_list); |
1189 | if (ret) |
1190 | return ret; |
1191 | } |
1192 | ref->data.weak++; |
1193 | } |
1194 | return 0; |
1195 | } |
1196 | |
1197 | /** |
1198 | * binder_dec_ref_olocked() - dec the ref for given handle |
1199 | * @ref: ref to be decremented |
1200 | * @strong: if true, strong decrement, else weak |
1201 | * |
1202 | * Decrement the ref. |
1203 | * |
1204 | * Return: %true if ref is cleaned up and ready to be freed. |
1205 | */ |
1206 | static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) |
1207 | { |
1208 | if (strong) { |
1209 | if (ref->data.strong == 0) { |
1210 | binder_user_error(format: "%d invalid dec strong, ref %d desc %d s %d w %d\n" , |
1211 | ref->proc->pid, ref->data.debug_id, |
1212 | ref->data.desc, ref->data.strong, |
1213 | ref->data.weak); |
1214 | return false; |
1215 | } |
1216 | ref->data.strong--; |
1217 | if (ref->data.strong == 0) |
1218 | binder_dec_node(node: ref->node, strong, internal: 1); |
1219 | } else { |
1220 | if (ref->data.weak == 0) { |
1221 | binder_user_error(format: "%d invalid dec weak, ref %d desc %d s %d w %d\n" , |
1222 | ref->proc->pid, ref->data.debug_id, |
1223 | ref->data.desc, ref->data.strong, |
1224 | ref->data.weak); |
1225 | return false; |
1226 | } |
1227 | ref->data.weak--; |
1228 | } |
1229 | if (ref->data.strong == 0 && ref->data.weak == 0) { |
1230 | binder_cleanup_ref_olocked(ref); |
1231 | return true; |
1232 | } |
1233 | return false; |
1234 | } |
1235 | |
1236 | /** |
1237 | * binder_get_node_from_ref() - get the node from the given proc/desc |
1238 | * @proc: proc containing the ref |
1239 | * @desc: the handle associated with the ref |
1240 | * @need_strong_ref: if true, only return node if ref is strong |
1241 | * @rdata: the id/refcount data for the ref |
1242 | * |
1243 | * Given a proc and ref handle, return the associated binder_node |
1244 | * |
1245 | * Return: a binder_node or NULL if not found or not strong when strong required |
1246 | */ |
1247 | static struct binder_node *binder_get_node_from_ref( |
1248 | struct binder_proc *proc, |
1249 | u32 desc, bool need_strong_ref, |
1250 | struct binder_ref_data *rdata) |
1251 | { |
1252 | struct binder_node *node; |
1253 | struct binder_ref *ref; |
1254 | |
1255 | binder_proc_lock(proc); |
1256 | ref = binder_get_ref_olocked(proc, desc, need_strong_ref); |
1257 | if (!ref) |
1258 | goto err_no_ref; |
1259 | node = ref->node; |
1260 | /* |
1261 | * Take an implicit reference on the node to ensure |
1262 | * it stays alive until the call to binder_put_node() |
1263 | */ |
1264 | binder_inc_node_tmpref(node); |
1265 | if (rdata) |
1266 | *rdata = ref->data; |
1267 | binder_proc_unlock(proc); |
1268 | |
1269 | return node; |
1270 | |
1271 | err_no_ref: |
1272 | binder_proc_unlock(proc); |
1273 | return NULL; |
1274 | } |
1275 | |
1276 | /** |
1277 | * binder_free_ref() - free the binder_ref |
1278 | * @ref: ref to free |
1279 | * |
1280 | * Free the binder_ref. Free the binder_node indicated by ref->node |
1281 | * (if non-NULL) and the binder_ref_death indicated by ref->death. |
1282 | */ |
1283 | static void binder_free_ref(struct binder_ref *ref) |
1284 | { |
1285 | if (ref->node) |
1286 | binder_free_node(node: ref->node); |
1287 | kfree(objp: ref->death); |
1288 | kfree(objp: ref); |
1289 | } |
1290 | |
1291 | /** |
1292 | * binder_update_ref_for_handle() - inc/dec the ref for given handle |
1293 | * @proc: proc containing the ref |
1294 | * @desc: the handle associated with the ref |
1295 | * @increment: true=inc reference, false=dec reference |
1296 | * @strong: true=strong reference, false=weak reference |
1297 | * @rdata: the id/refcount data for the ref |
1298 | * |
1299 | * Given a proc and ref handle, increment or decrement the ref |
1300 | * according to "increment" arg. |
1301 | * |
1302 | * Return: 0 if successful, else errno |
1303 | */ |
1304 | static int binder_update_ref_for_handle(struct binder_proc *proc, |
1305 | uint32_t desc, bool increment, bool strong, |
1306 | struct binder_ref_data *rdata) |
1307 | { |
1308 | int ret = 0; |
1309 | struct binder_ref *ref; |
1310 | bool delete_ref = false; |
1311 | |
1312 | binder_proc_lock(proc); |
1313 | ref = binder_get_ref_olocked(proc, desc, need_strong_ref: strong); |
1314 | if (!ref) { |
1315 | ret = -EINVAL; |
1316 | goto err_no_ref; |
1317 | } |
1318 | if (increment) |
1319 | ret = binder_inc_ref_olocked(ref, strong, NULL); |
1320 | else |
1321 | delete_ref = binder_dec_ref_olocked(ref, strong); |
1322 | |
1323 | if (rdata) |
1324 | *rdata = ref->data; |
1325 | binder_proc_unlock(proc); |
1326 | |
1327 | if (delete_ref) |
1328 | binder_free_ref(ref); |
1329 | return ret; |
1330 | |
1331 | err_no_ref: |
1332 | binder_proc_unlock(proc); |
1333 | return ret; |
1334 | } |
1335 | |
1336 | /** |
1337 | * binder_dec_ref_for_handle() - dec the ref for given handle |
1338 | * @proc: proc containing the ref |
1339 | * @desc: the handle associated with the ref |
1340 | * @strong: true=strong reference, false=weak reference |
1341 | * @rdata: the id/refcount data for the ref |
1342 | * |
1343 | * Just calls binder_update_ref_for_handle() to decrement the ref. |
1344 | * |
1345 | * Return: 0 if successful, else errno |
1346 | */ |
1347 | static int binder_dec_ref_for_handle(struct binder_proc *proc, |
1348 | uint32_t desc, bool strong, struct binder_ref_data *rdata) |
1349 | { |
1350 | return binder_update_ref_for_handle(proc, desc, increment: false, strong, rdata); |
1351 | } |
1352 | |
1353 | |
1354 | /** |
1355 | * binder_inc_ref_for_node() - increment the ref for given proc/node |
1356 | * @proc: proc containing the ref |
1357 | * @node: target node |
1358 | * @strong: true=strong reference, false=weak reference |
1359 | * @target_list: worklist to use if node is incremented |
1360 | * @rdata: the id/refcount data for the ref |
1361 | * |
1362 | * Given a proc and node, increment the ref. Create the ref if it |
1363 | * doesn't already exist |
1364 | * |
1365 | * Return: 0 if successful, else errno |
1366 | */ |
1367 | static int binder_inc_ref_for_node(struct binder_proc *proc, |
1368 | struct binder_node *node, |
1369 | bool strong, |
1370 | struct list_head *target_list, |
1371 | struct binder_ref_data *rdata) |
1372 | { |
1373 | struct binder_ref *ref; |
1374 | struct binder_ref *new_ref = NULL; |
1375 | int ret = 0; |
1376 | |
1377 | binder_proc_lock(proc); |
1378 | ref = binder_get_ref_for_node_olocked(proc, node, NULL); |
1379 | if (!ref) { |
1380 | binder_proc_unlock(proc); |
1381 | new_ref = kzalloc(size: sizeof(*ref), GFP_KERNEL); |
1382 | if (!new_ref) |
1383 | return -ENOMEM; |
1384 | binder_proc_lock(proc); |
1385 | ref = binder_get_ref_for_node_olocked(proc, node, new_ref); |
1386 | } |
1387 | ret = binder_inc_ref_olocked(ref, strong, target_list); |
1388 | *rdata = ref->data; |
1389 | if (ret && ref == new_ref) { |
1390 | /* |
1391 | * Cleanup the failed reference here as the target |
1392 | * could now be dead and have already released its |
1393 | * references by now. Calling on the new reference |
1394 | * with strong=0 and a tmp_refs will not decrement |
1395 | * the node. The new_ref gets kfree'd below. |
1396 | */ |
1397 | binder_cleanup_ref_olocked(ref: new_ref); |
1398 | ref = NULL; |
1399 | } |
1400 | |
1401 | binder_proc_unlock(proc); |
1402 | if (new_ref && ref != new_ref) |
1403 | /* |
1404 | * Another thread created the ref first so |
1405 | * free the one we allocated |
1406 | */ |
1407 | kfree(objp: new_ref); |
1408 | return ret; |
1409 | } |
1410 | |
1411 | static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, |
1412 | struct binder_transaction *t) |
1413 | { |
1414 | BUG_ON(!target_thread); |
1415 | assert_spin_locked(&target_thread->proc->inner_lock); |
1416 | BUG_ON(target_thread->transaction_stack != t); |
1417 | BUG_ON(target_thread->transaction_stack->from != target_thread); |
1418 | target_thread->transaction_stack = |
1419 | target_thread->transaction_stack->from_parent; |
1420 | t->from = NULL; |
1421 | } |
1422 | |
1423 | /** |
1424 | * binder_thread_dec_tmpref() - decrement thread->tmp_ref |
1425 | * @thread: thread to decrement |
1426 | * |
1427 | * A thread needs to be kept alive while being used to create or |
1428 | * handle a transaction. binder_get_txn_from() is used to safely |
1429 | * extract t->from from a binder_transaction and keep the thread |
1430 | * indicated by t->from from being freed. When done with that |
1431 | * binder_thread, this function is called to decrement the |
1432 | * tmp_ref and free if appropriate (thread has been released |
1433 | * and no transaction being processed by the driver) |
1434 | */ |
1435 | static void binder_thread_dec_tmpref(struct binder_thread *thread) |
1436 | { |
1437 | /* |
1438 | * atomic is used to protect the counter value while |
1439 | * it cannot reach zero or thread->is_dead is false |
1440 | */ |
1441 | binder_inner_proc_lock(thread->proc); |
1442 | atomic_dec(v: &thread->tmp_ref); |
1443 | if (thread->is_dead && !atomic_read(v: &thread->tmp_ref)) { |
1444 | binder_inner_proc_unlock(thread->proc); |
1445 | binder_free_thread(thread); |
1446 | return; |
1447 | } |
1448 | binder_inner_proc_unlock(thread->proc); |
1449 | } |
1450 | |
1451 | /** |
1452 | * binder_proc_dec_tmpref() - decrement proc->tmp_ref |
1453 | * @proc: proc to decrement |
1454 | * |
1455 | * A binder_proc needs to be kept alive while being used to create or |
1456 | * handle a transaction. proc->tmp_ref is incremented when |
1457 | * creating a new transaction or the binder_proc is currently in-use |
1458 | * by threads that are being released. When done with the binder_proc, |
1459 | * this function is called to decrement the counter and free the |
1460 | * proc if appropriate (proc has been released, all threads have |
1461 | * been released and not currenly in-use to process a transaction). |
1462 | */ |
1463 | static void binder_proc_dec_tmpref(struct binder_proc *proc) |
1464 | { |
1465 | binder_inner_proc_lock(proc); |
1466 | proc->tmp_ref--; |
1467 | if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && |
1468 | !proc->tmp_ref) { |
1469 | binder_inner_proc_unlock(proc); |
1470 | binder_free_proc(proc); |
1471 | return; |
1472 | } |
1473 | binder_inner_proc_unlock(proc); |
1474 | } |
1475 | |
1476 | /** |
1477 | * binder_get_txn_from() - safely extract the "from" thread in transaction |
1478 | * @t: binder transaction for t->from |
1479 | * |
1480 | * Atomically return the "from" thread and increment the tmp_ref |
1481 | * count for the thread to ensure it stays alive until |
1482 | * binder_thread_dec_tmpref() is called. |
1483 | * |
1484 | * Return: the value of t->from |
1485 | */ |
1486 | static struct binder_thread *binder_get_txn_from( |
1487 | struct binder_transaction *t) |
1488 | { |
1489 | struct binder_thread *from; |
1490 | |
1491 | spin_lock(lock: &t->lock); |
1492 | from = t->from; |
1493 | if (from) |
1494 | atomic_inc(v: &from->tmp_ref); |
1495 | spin_unlock(lock: &t->lock); |
1496 | return from; |
1497 | } |
1498 | |
1499 | /** |
1500 | * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock |
1501 | * @t: binder transaction for t->from |
1502 | * |
1503 | * Same as binder_get_txn_from() except it also acquires the proc->inner_lock |
1504 | * to guarantee that the thread cannot be released while operating on it. |
1505 | * The caller must call binder_inner_proc_unlock() to release the inner lock |
1506 | * as well as call binder_dec_thread_txn() to release the reference. |
1507 | * |
1508 | * Return: the value of t->from |
1509 | */ |
1510 | static struct binder_thread *binder_get_txn_from_and_acq_inner( |
1511 | struct binder_transaction *t) |
1512 | __acquires(&t->from->proc->inner_lock) |
1513 | { |
1514 | struct binder_thread *from; |
1515 | |
1516 | from = binder_get_txn_from(t); |
1517 | if (!from) { |
1518 | __acquire(&from->proc->inner_lock); |
1519 | return NULL; |
1520 | } |
1521 | binder_inner_proc_lock(from->proc); |
1522 | if (t->from) { |
1523 | BUG_ON(from != t->from); |
1524 | return from; |
1525 | } |
1526 | binder_inner_proc_unlock(from->proc); |
1527 | __acquire(&from->proc->inner_lock); |
1528 | binder_thread_dec_tmpref(thread: from); |
1529 | return NULL; |
1530 | } |
1531 | |
1532 | /** |
1533 | * binder_free_txn_fixups() - free unprocessed fd fixups |
1534 | * @t: binder transaction for t->from |
1535 | * |
1536 | * If the transaction is being torn down prior to being |
1537 | * processed by the target process, free all of the |
1538 | * fd fixups and fput the file structs. It is safe to |
1539 | * call this function after the fixups have been |
1540 | * processed -- in that case, the list will be empty. |
1541 | */ |
1542 | static void binder_free_txn_fixups(struct binder_transaction *t) |
1543 | { |
1544 | struct binder_txn_fd_fixup *fixup, *tmp; |
1545 | |
1546 | list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { |
1547 | fput(fixup->file); |
1548 | if (fixup->target_fd >= 0) |
1549 | put_unused_fd(fd: fixup->target_fd); |
1550 | list_del(entry: &fixup->fixup_entry); |
1551 | kfree(objp: fixup); |
1552 | } |
1553 | } |
1554 | |
1555 | static void binder_txn_latency_free(struct binder_transaction *t) |
1556 | { |
1557 | int from_proc, from_thread, to_proc, to_thread; |
1558 | |
1559 | spin_lock(lock: &t->lock); |
1560 | from_proc = t->from ? t->from->proc->pid : 0; |
1561 | from_thread = t->from ? t->from->pid : 0; |
1562 | to_proc = t->to_proc ? t->to_proc->pid : 0; |
1563 | to_thread = t->to_thread ? t->to_thread->pid : 0; |
1564 | spin_unlock(lock: &t->lock); |
1565 | |
1566 | trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); |
1567 | } |
1568 | |
1569 | static void binder_free_transaction(struct binder_transaction *t) |
1570 | { |
1571 | struct binder_proc *target_proc = t->to_proc; |
1572 | |
1573 | if (target_proc) { |
1574 | binder_inner_proc_lock(target_proc); |
1575 | target_proc->outstanding_txns--; |
1576 | if (target_proc->outstanding_txns < 0) |
1577 | pr_warn("%s: Unexpected outstanding_txns %d\n" , |
1578 | __func__, target_proc->outstanding_txns); |
1579 | if (!target_proc->outstanding_txns && target_proc->is_frozen) |
1580 | wake_up_interruptible_all(&target_proc->freeze_wait); |
1581 | if (t->buffer) |
1582 | t->buffer->transaction = NULL; |
1583 | binder_inner_proc_unlock(target_proc); |
1584 | } |
1585 | if (trace_binder_txn_latency_free_enabled()) |
1586 | binder_txn_latency_free(t); |
1587 | /* |
1588 | * If the transaction has no target_proc, then |
1589 | * t->buffer->transaction has already been cleared. |
1590 | */ |
1591 | binder_free_txn_fixups(t); |
1592 | kfree(objp: t); |
1593 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION); |
1594 | } |
1595 | |
1596 | static void binder_send_failed_reply(struct binder_transaction *t, |
1597 | uint32_t error_code) |
1598 | { |
1599 | struct binder_thread *target_thread; |
1600 | struct binder_transaction *next; |
1601 | |
1602 | BUG_ON(t->flags & TF_ONE_WAY); |
1603 | while (1) { |
1604 | target_thread = binder_get_txn_from_and_acq_inner(t); |
1605 | if (target_thread) { |
1606 | binder_debug(mask: BINDER_DEBUG_FAILED_TRANSACTION, |
1607 | format: "send failed reply for transaction %d to %d:%d\n" , |
1608 | t->debug_id, |
1609 | target_thread->proc->pid, |
1610 | target_thread->pid); |
1611 | |
1612 | binder_pop_transaction_ilocked(target_thread, t); |
1613 | if (target_thread->reply_error.cmd == BR_OK) { |
1614 | target_thread->reply_error.cmd = error_code; |
1615 | binder_enqueue_thread_work_ilocked( |
1616 | thread: target_thread, |
1617 | work: &target_thread->reply_error.work); |
1618 | wake_up_interruptible(&target_thread->wait); |
1619 | } else { |
1620 | /* |
1621 | * Cannot get here for normal operation, but |
1622 | * we can if multiple synchronous transactions |
1623 | * are sent without blocking for responses. |
1624 | * Just ignore the 2nd error in this case. |
1625 | */ |
1626 | pr_warn("Unexpected reply error: %u\n" , |
1627 | target_thread->reply_error.cmd); |
1628 | } |
1629 | binder_inner_proc_unlock(target_thread->proc); |
1630 | binder_thread_dec_tmpref(thread: target_thread); |
1631 | binder_free_transaction(t); |
1632 | return; |
1633 | } |
1634 | __release(&target_thread->proc->inner_lock); |
1635 | next = t->from_parent; |
1636 | |
1637 | binder_debug(mask: BINDER_DEBUG_FAILED_TRANSACTION, |
1638 | format: "send failed reply for transaction %d, target dead\n" , |
1639 | t->debug_id); |
1640 | |
1641 | binder_free_transaction(t); |
1642 | if (next == NULL) { |
1643 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
1644 | format: "reply failed, no target thread at root\n" ); |
1645 | return; |
1646 | } |
1647 | t = next; |
1648 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
1649 | format: "reply failed, no target thread -- retry %d\n" , |
1650 | t->debug_id); |
1651 | } |
1652 | } |
1653 | |
1654 | /** |
1655 | * binder_cleanup_transaction() - cleans up undelivered transaction |
1656 | * @t: transaction that needs to be cleaned up |
1657 | * @reason: reason the transaction wasn't delivered |
1658 | * @error_code: error to return to caller (if synchronous call) |
1659 | */ |
1660 | static void binder_cleanup_transaction(struct binder_transaction *t, |
1661 | const char *reason, |
1662 | uint32_t error_code) |
1663 | { |
1664 | if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { |
1665 | binder_send_failed_reply(t, error_code); |
1666 | } else { |
1667 | binder_debug(mask: BINDER_DEBUG_DEAD_TRANSACTION, |
1668 | format: "undelivered transaction %d, %s\n" , |
1669 | t->debug_id, reason); |
1670 | binder_free_transaction(t); |
1671 | } |
1672 | } |
1673 | |
1674 | /** |
1675 | * binder_get_object() - gets object and checks for valid metadata |
1676 | * @proc: binder_proc owning the buffer |
1677 | * @u: sender's user pointer to base of buffer |
1678 | * @buffer: binder_buffer that we're parsing. |
1679 | * @offset: offset in the @buffer at which to validate an object. |
1680 | * @object: struct binder_object to read into |
1681 | * |
1682 | * Copy the binder object at the given offset into @object. If @u is |
1683 | * provided then the copy is from the sender's buffer. If not, then |
1684 | * it is copied from the target's @buffer. |
1685 | * |
1686 | * Return: If there's a valid metadata object at @offset, the |
1687 | * size of that object. Otherwise, it returns zero. The object |
1688 | * is read into the struct binder_object pointed to by @object. |
1689 | */ |
1690 | static size_t binder_get_object(struct binder_proc *proc, |
1691 | const void __user *u, |
1692 | struct binder_buffer *buffer, |
1693 | unsigned long offset, |
1694 | struct binder_object *object) |
1695 | { |
1696 | size_t read_size; |
1697 | struct binder_object_header *hdr; |
1698 | size_t object_size = 0; |
1699 | |
1700 | read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); |
1701 | if (offset > buffer->data_size || read_size < sizeof(*hdr)) |
1702 | return 0; |
1703 | if (u) { |
1704 | if (copy_from_user(to: object, from: u + offset, n: read_size)) |
1705 | return 0; |
1706 | } else { |
1707 | if (binder_alloc_copy_from_buffer(alloc: &proc->alloc, dest: object, buffer, |
1708 | buffer_offset: offset, bytes: read_size)) |
1709 | return 0; |
1710 | } |
1711 | |
1712 | /* Ok, now see if we read a complete object. */ |
1713 | hdr = &object->hdr; |
1714 | switch (hdr->type) { |
1715 | case BINDER_TYPE_BINDER: |
1716 | case BINDER_TYPE_WEAK_BINDER: |
1717 | case BINDER_TYPE_HANDLE: |
1718 | case BINDER_TYPE_WEAK_HANDLE: |
1719 | object_size = sizeof(struct flat_binder_object); |
1720 | break; |
1721 | case BINDER_TYPE_FD: |
1722 | object_size = sizeof(struct binder_fd_object); |
1723 | break; |
1724 | case BINDER_TYPE_PTR: |
1725 | object_size = sizeof(struct binder_buffer_object); |
1726 | break; |
1727 | case BINDER_TYPE_FDA: |
1728 | object_size = sizeof(struct binder_fd_array_object); |
1729 | break; |
1730 | default: |
1731 | return 0; |
1732 | } |
1733 | if (offset <= buffer->data_size - object_size && |
1734 | buffer->data_size >= object_size) |
1735 | return object_size; |
1736 | else |
1737 | return 0; |
1738 | } |
1739 | |
1740 | /** |
1741 | * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. |
1742 | * @proc: binder_proc owning the buffer |
1743 | * @b: binder_buffer containing the object |
1744 | * @object: struct binder_object to read into |
1745 | * @index: index in offset array at which the binder_buffer_object is |
1746 | * located |
1747 | * @start_offset: points to the start of the offset array |
1748 | * @object_offsetp: offset of @object read from @b |
1749 | * @num_valid: the number of valid offsets in the offset array |
1750 | * |
1751 | * Return: If @index is within the valid range of the offset array |
1752 | * described by @start and @num_valid, and if there's a valid |
1753 | * binder_buffer_object at the offset found in index @index |
1754 | * of the offset array, that object is returned. Otherwise, |
1755 | * %NULL is returned. |
1756 | * Note that the offset found in index @index itself is not |
1757 | * verified; this function assumes that @num_valid elements |
1758 | * from @start were previously verified to have valid offsets. |
1759 | * If @object_offsetp is non-NULL, then the offset within |
1760 | * @b is written to it. |
1761 | */ |
1762 | static struct binder_buffer_object *binder_validate_ptr( |
1763 | struct binder_proc *proc, |
1764 | struct binder_buffer *b, |
1765 | struct binder_object *object, |
1766 | binder_size_t index, |
1767 | binder_size_t start_offset, |
1768 | binder_size_t *object_offsetp, |
1769 | binder_size_t num_valid) |
1770 | { |
1771 | size_t object_size; |
1772 | binder_size_t object_offset; |
1773 | unsigned long buffer_offset; |
1774 | |
1775 | if (index >= num_valid) |
1776 | return NULL; |
1777 | |
1778 | buffer_offset = start_offset + sizeof(binder_size_t) * index; |
1779 | if (binder_alloc_copy_from_buffer(alloc: &proc->alloc, dest: &object_offset, |
1780 | buffer: b, buffer_offset, |
1781 | bytes: sizeof(object_offset))) |
1782 | return NULL; |
1783 | object_size = binder_get_object(proc, NULL, buffer: b, offset: object_offset, object); |
1784 | if (!object_size || object->hdr.type != BINDER_TYPE_PTR) |
1785 | return NULL; |
1786 | if (object_offsetp) |
1787 | *object_offsetp = object_offset; |
1788 | |
1789 | return &object->bbo; |
1790 | } |
1791 | |
1792 | /** |
1793 | * binder_validate_fixup() - validates pointer/fd fixups happen in order. |
1794 | * @proc: binder_proc owning the buffer |
1795 | * @b: transaction buffer |
1796 | * @objects_start_offset: offset to start of objects buffer |
1797 | * @buffer_obj_offset: offset to binder_buffer_object in which to fix up |
1798 | * @fixup_offset: start offset in @buffer to fix up |
1799 | * @last_obj_offset: offset to last binder_buffer_object that we fixed |
1800 | * @last_min_offset: minimum fixup offset in object at @last_obj_offset |
1801 | * |
1802 | * Return: %true if a fixup in buffer @buffer at offset @offset is |
1803 | * allowed. |
1804 | * |
1805 | * For safety reasons, we only allow fixups inside a buffer to happen |
1806 | * at increasing offsets; additionally, we only allow fixup on the last |
1807 | * buffer object that was verified, or one of its parents. |
1808 | * |
1809 | * Example of what is allowed: |
1810 | * |
1811 | * A |
1812 | * B (parent = A, offset = 0) |
1813 | * C (parent = A, offset = 16) |
1814 | * D (parent = C, offset = 0) |
1815 | * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) |
1816 | * |
1817 | * Examples of what is not allowed: |
1818 | * |
1819 | * Decreasing offsets within the same parent: |
1820 | * A |
1821 | * C (parent = A, offset = 16) |
1822 | * B (parent = A, offset = 0) // decreasing offset within A |
1823 | * |
1824 | * Referring to a parent that wasn't the last object or any of its parents: |
1825 | * A |
1826 | * B (parent = A, offset = 0) |
1827 | * C (parent = A, offset = 0) |
1828 | * C (parent = A, offset = 16) |
1829 | * D (parent = B, offset = 0) // B is not A or any of A's parents |
1830 | */ |
1831 | static bool binder_validate_fixup(struct binder_proc *proc, |
1832 | struct binder_buffer *b, |
1833 | binder_size_t objects_start_offset, |
1834 | binder_size_t buffer_obj_offset, |
1835 | binder_size_t fixup_offset, |
1836 | binder_size_t last_obj_offset, |
1837 | binder_size_t last_min_offset) |
1838 | { |
1839 | if (!last_obj_offset) { |
1840 | /* Nothing to fix up in */ |
1841 | return false; |
1842 | } |
1843 | |
1844 | while (last_obj_offset != buffer_obj_offset) { |
1845 | unsigned long buffer_offset; |
1846 | struct binder_object last_object; |
1847 | struct binder_buffer_object *last_bbo; |
1848 | size_t object_size = binder_get_object(proc, NULL, buffer: b, |
1849 | offset: last_obj_offset, |
1850 | object: &last_object); |
1851 | if (object_size != sizeof(*last_bbo)) |
1852 | return false; |
1853 | |
1854 | last_bbo = &last_object.bbo; |
1855 | /* |
1856 | * Safe to retrieve the parent of last_obj, since it |
1857 | * was already previously verified by the driver. |
1858 | */ |
1859 | if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) |
1860 | return false; |
1861 | last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); |
1862 | buffer_offset = objects_start_offset + |
1863 | sizeof(binder_size_t) * last_bbo->parent; |
1864 | if (binder_alloc_copy_from_buffer(alloc: &proc->alloc, |
1865 | dest: &last_obj_offset, |
1866 | buffer: b, buffer_offset, |
1867 | bytes: sizeof(last_obj_offset))) |
1868 | return false; |
1869 | } |
1870 | return (fixup_offset >= last_min_offset); |
1871 | } |
1872 | |
1873 | /** |
1874 | * struct binder_task_work_cb - for deferred close |
1875 | * |
1876 | * @twork: callback_head for task work |
1877 | * @fd: fd to close |
1878 | * |
1879 | * Structure to pass task work to be handled after |
1880 | * returning from binder_ioctl() via task_work_add(). |
1881 | */ |
1882 | struct binder_task_work_cb { |
1883 | struct callback_head twork; |
1884 | struct file *file; |
1885 | }; |
1886 | |
1887 | /** |
1888 | * binder_do_fd_close() - close list of file descriptors |
1889 | * @twork: callback head for task work |
1890 | * |
1891 | * It is not safe to call ksys_close() during the binder_ioctl() |
1892 | * function if there is a chance that binder's own file descriptor |
1893 | * might be closed. This is to meet the requirements for using |
1894 | * fdget() (see comments for __fget_light()). Therefore use |
1895 | * task_work_add() to schedule the close operation once we have |
1896 | * returned from binder_ioctl(). This function is a callback |
1897 | * for that mechanism and does the actual ksys_close() on the |
1898 | * given file descriptor. |
1899 | */ |
1900 | static void binder_do_fd_close(struct callback_head *twork) |
1901 | { |
1902 | struct binder_task_work_cb *twcb = container_of(twork, |
1903 | struct binder_task_work_cb, twork); |
1904 | |
1905 | fput(twcb->file); |
1906 | kfree(objp: twcb); |
1907 | } |
1908 | |
1909 | /** |
1910 | * binder_deferred_fd_close() - schedule a close for the given file-descriptor |
1911 | * @fd: file-descriptor to close |
1912 | * |
1913 | * See comments in binder_do_fd_close(). This function is used to schedule |
1914 | * a file-descriptor to be closed after returning from binder_ioctl(). |
1915 | */ |
1916 | static void binder_deferred_fd_close(int fd) |
1917 | { |
1918 | struct binder_task_work_cb *twcb; |
1919 | |
1920 | twcb = kzalloc(size: sizeof(*twcb), GFP_KERNEL); |
1921 | if (!twcb) |
1922 | return; |
1923 | init_task_work(twork: &twcb->twork, func: binder_do_fd_close); |
1924 | twcb->file = close_fd_get_file(fd); |
1925 | if (twcb->file) { |
1926 | // pin it until binder_do_fd_close(); see comments there |
1927 | get_file(f: twcb->file); |
1928 | filp_close(twcb->file, current->files); |
1929 | task_work_add(current, twork: &twcb->twork, mode: TWA_RESUME); |
1930 | } else { |
1931 | kfree(objp: twcb); |
1932 | } |
1933 | } |
1934 | |
1935 | static void binder_transaction_buffer_release(struct binder_proc *proc, |
1936 | struct binder_thread *thread, |
1937 | struct binder_buffer *buffer, |
1938 | binder_size_t off_end_offset, |
1939 | bool is_failure) |
1940 | { |
1941 | int debug_id = buffer->debug_id; |
1942 | binder_size_t off_start_offset, buffer_offset; |
1943 | |
1944 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
1945 | format: "%d buffer release %d, size %zd-%zd, failed at %llx\n" , |
1946 | proc->pid, buffer->debug_id, |
1947 | buffer->data_size, buffer->offsets_size, |
1948 | (unsigned long long)off_end_offset); |
1949 | |
1950 | if (buffer->target_node) |
1951 | binder_dec_node(node: buffer->target_node, strong: 1, internal: 0); |
1952 | |
1953 | off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); |
1954 | |
1955 | for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; |
1956 | buffer_offset += sizeof(binder_size_t)) { |
1957 | struct binder_object_header *hdr; |
1958 | size_t object_size = 0; |
1959 | struct binder_object object; |
1960 | binder_size_t object_offset; |
1961 | |
1962 | if (!binder_alloc_copy_from_buffer(alloc: &proc->alloc, dest: &object_offset, |
1963 | buffer, buffer_offset, |
1964 | bytes: sizeof(object_offset))) |
1965 | object_size = binder_get_object(proc, NULL, buffer, |
1966 | offset: object_offset, object: &object); |
1967 | if (object_size == 0) { |
1968 | pr_err("transaction release %d bad object at offset %lld, size %zd\n" , |
1969 | debug_id, (u64)object_offset, buffer->data_size); |
1970 | continue; |
1971 | } |
1972 | hdr = &object.hdr; |
1973 | switch (hdr->type) { |
1974 | case BINDER_TYPE_BINDER: |
1975 | case BINDER_TYPE_WEAK_BINDER: { |
1976 | struct flat_binder_object *fp; |
1977 | struct binder_node *node; |
1978 | |
1979 | fp = to_flat_binder_object(hdr); |
1980 | node = binder_get_node(proc, ptr: fp->binder); |
1981 | if (node == NULL) { |
1982 | pr_err("transaction release %d bad node %016llx\n" , |
1983 | debug_id, (u64)fp->binder); |
1984 | break; |
1985 | } |
1986 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
1987 | format: " node %d u%016llx\n" , |
1988 | node->debug_id, (u64)node->ptr); |
1989 | binder_dec_node(node, strong: hdr->type == BINDER_TYPE_BINDER, |
1990 | internal: 0); |
1991 | binder_put_node(node); |
1992 | } break; |
1993 | case BINDER_TYPE_HANDLE: |
1994 | case BINDER_TYPE_WEAK_HANDLE: { |
1995 | struct flat_binder_object *fp; |
1996 | struct binder_ref_data rdata; |
1997 | int ret; |
1998 | |
1999 | fp = to_flat_binder_object(hdr); |
2000 | ret = binder_dec_ref_for_handle(proc, desc: fp->handle, |
2001 | strong: hdr->type == BINDER_TYPE_HANDLE, rdata: &rdata); |
2002 | |
2003 | if (ret) { |
2004 | pr_err("transaction release %d bad handle %d, ret = %d\n" , |
2005 | debug_id, fp->handle, ret); |
2006 | break; |
2007 | } |
2008 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
2009 | format: " ref %d desc %d\n" , |
2010 | rdata.debug_id, rdata.desc); |
2011 | } break; |
2012 | |
2013 | case BINDER_TYPE_FD: { |
2014 | /* |
2015 | * No need to close the file here since user-space |
2016 | * closes it for successfully delivered |
2017 | * transactions. For transactions that weren't |
2018 | * delivered, the new fd was never allocated so |
2019 | * there is no need to close and the fput on the |
2020 | * file is done when the transaction is torn |
2021 | * down. |
2022 | */ |
2023 | } break; |
2024 | case BINDER_TYPE_PTR: |
2025 | /* |
2026 | * Nothing to do here, this will get cleaned up when the |
2027 | * transaction buffer gets freed |
2028 | */ |
2029 | break; |
2030 | case BINDER_TYPE_FDA: { |
2031 | struct binder_fd_array_object *fda; |
2032 | struct binder_buffer_object *parent; |
2033 | struct binder_object ptr_object; |
2034 | binder_size_t fda_offset; |
2035 | size_t fd_index; |
2036 | binder_size_t fd_buf_size; |
2037 | binder_size_t num_valid; |
2038 | |
2039 | if (is_failure) { |
2040 | /* |
2041 | * The fd fixups have not been applied so no |
2042 | * fds need to be closed. |
2043 | */ |
2044 | continue; |
2045 | } |
2046 | |
2047 | num_valid = (buffer_offset - off_start_offset) / |
2048 | sizeof(binder_size_t); |
2049 | fda = to_binder_fd_array_object(hdr); |
2050 | parent = binder_validate_ptr(proc, b: buffer, object: &ptr_object, |
2051 | index: fda->parent, |
2052 | start_offset: off_start_offset, |
2053 | NULL, |
2054 | num_valid); |
2055 | if (!parent) { |
2056 | pr_err("transaction release %d bad parent offset\n" , |
2057 | debug_id); |
2058 | continue; |
2059 | } |
2060 | fd_buf_size = sizeof(u32) * fda->num_fds; |
2061 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { |
2062 | pr_err("transaction release %d invalid number of fds (%lld)\n" , |
2063 | debug_id, (u64)fda->num_fds); |
2064 | continue; |
2065 | } |
2066 | if (fd_buf_size > parent->length || |
2067 | fda->parent_offset > parent->length - fd_buf_size) { |
2068 | /* No space for all file descriptors here. */ |
2069 | pr_err("transaction release %d not enough space for %lld fds in buffer\n" , |
2070 | debug_id, (u64)fda->num_fds); |
2071 | continue; |
2072 | } |
2073 | /* |
2074 | * the source data for binder_buffer_object is visible |
2075 | * to user-space and the @buffer element is the user |
2076 | * pointer to the buffer_object containing the fd_array. |
2077 | * Convert the address to an offset relative to |
2078 | * the base of the transaction buffer. |
2079 | */ |
2080 | fda_offset = |
2081 | (parent->buffer - (uintptr_t)buffer->user_data) + |
2082 | fda->parent_offset; |
2083 | for (fd_index = 0; fd_index < fda->num_fds; |
2084 | fd_index++) { |
2085 | u32 fd; |
2086 | int err; |
2087 | binder_size_t offset = fda_offset + |
2088 | fd_index * sizeof(fd); |
2089 | |
2090 | err = binder_alloc_copy_from_buffer( |
2091 | alloc: &proc->alloc, dest: &fd, buffer, |
2092 | buffer_offset: offset, bytes: sizeof(fd)); |
2093 | WARN_ON(err); |
2094 | if (!err) { |
2095 | binder_deferred_fd_close(fd); |
2096 | /* |
2097 | * Need to make sure the thread goes |
2098 | * back to userspace to complete the |
2099 | * deferred close |
2100 | */ |
2101 | if (thread) |
2102 | thread->looper_need_return = true; |
2103 | } |
2104 | } |
2105 | } break; |
2106 | default: |
2107 | pr_err("transaction release %d bad object type %x\n" , |
2108 | debug_id, hdr->type); |
2109 | break; |
2110 | } |
2111 | } |
2112 | } |
2113 | |
2114 | /* Clean up all the objects in the buffer */ |
2115 | static inline void binder_release_entire_buffer(struct binder_proc *proc, |
2116 | struct binder_thread *thread, |
2117 | struct binder_buffer *buffer, |
2118 | bool is_failure) |
2119 | { |
2120 | binder_size_t off_end_offset; |
2121 | |
2122 | off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); |
2123 | off_end_offset += buffer->offsets_size; |
2124 | |
2125 | binder_transaction_buffer_release(proc, thread, buffer, |
2126 | off_end_offset, is_failure); |
2127 | } |
2128 | |
2129 | static int binder_translate_binder(struct flat_binder_object *fp, |
2130 | struct binder_transaction *t, |
2131 | struct binder_thread *thread) |
2132 | { |
2133 | struct binder_node *node; |
2134 | struct binder_proc *proc = thread->proc; |
2135 | struct binder_proc *target_proc = t->to_proc; |
2136 | struct binder_ref_data rdata; |
2137 | int ret = 0; |
2138 | |
2139 | node = binder_get_node(proc, ptr: fp->binder); |
2140 | if (!node) { |
2141 | node = binder_new_node(proc, fp); |
2142 | if (!node) |
2143 | return -ENOMEM; |
2144 | } |
2145 | if (fp->cookie != node->cookie) { |
2146 | binder_user_error(format: "%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n" , |
2147 | proc->pid, thread->pid, (u64)fp->binder, |
2148 | node->debug_id, (u64)fp->cookie, |
2149 | (u64)node->cookie); |
2150 | ret = -EINVAL; |
2151 | goto done; |
2152 | } |
2153 | if (security_binder_transfer_binder(from: proc->cred, to: target_proc->cred)) { |
2154 | ret = -EPERM; |
2155 | goto done; |
2156 | } |
2157 | |
2158 | ret = binder_inc_ref_for_node(proc: target_proc, node, |
2159 | strong: fp->hdr.type == BINDER_TYPE_BINDER, |
2160 | target_list: &thread->todo, rdata: &rdata); |
2161 | if (ret) |
2162 | goto done; |
2163 | |
2164 | if (fp->hdr.type == BINDER_TYPE_BINDER) |
2165 | fp->hdr.type = BINDER_TYPE_HANDLE; |
2166 | else |
2167 | fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; |
2168 | fp->binder = 0; |
2169 | fp->handle = rdata.desc; |
2170 | fp->cookie = 0; |
2171 | |
2172 | trace_binder_transaction_node_to_ref(t, node, rdata: &rdata); |
2173 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
2174 | format: " node %d u%016llx -> ref %d desc %d\n" , |
2175 | node->debug_id, (u64)node->ptr, |
2176 | rdata.debug_id, rdata.desc); |
2177 | done: |
2178 | binder_put_node(node); |
2179 | return ret; |
2180 | } |
2181 | |
2182 | static int binder_translate_handle(struct flat_binder_object *fp, |
2183 | struct binder_transaction *t, |
2184 | struct binder_thread *thread) |
2185 | { |
2186 | struct binder_proc *proc = thread->proc; |
2187 | struct binder_proc *target_proc = t->to_proc; |
2188 | struct binder_node *node; |
2189 | struct binder_ref_data src_rdata; |
2190 | int ret = 0; |
2191 | |
2192 | node = binder_get_node_from_ref(proc, desc: fp->handle, |
2193 | need_strong_ref: fp->hdr.type == BINDER_TYPE_HANDLE, rdata: &src_rdata); |
2194 | if (!node) { |
2195 | binder_user_error(format: "%d:%d got transaction with invalid handle, %d\n" , |
2196 | proc->pid, thread->pid, fp->handle); |
2197 | return -EINVAL; |
2198 | } |
2199 | if (security_binder_transfer_binder(from: proc->cred, to: target_proc->cred)) { |
2200 | ret = -EPERM; |
2201 | goto done; |
2202 | } |
2203 | |
2204 | binder_node_lock(node); |
2205 | if (node->proc == target_proc) { |
2206 | if (fp->hdr.type == BINDER_TYPE_HANDLE) |
2207 | fp->hdr.type = BINDER_TYPE_BINDER; |
2208 | else |
2209 | fp->hdr.type = BINDER_TYPE_WEAK_BINDER; |
2210 | fp->binder = node->ptr; |
2211 | fp->cookie = node->cookie; |
2212 | if (node->proc) |
2213 | binder_inner_proc_lock(node->proc); |
2214 | else |
2215 | __acquire(&node->proc->inner_lock); |
2216 | binder_inc_node_nilocked(node, |
2217 | strong: fp->hdr.type == BINDER_TYPE_BINDER, |
2218 | internal: 0, NULL); |
2219 | if (node->proc) |
2220 | binder_inner_proc_unlock(node->proc); |
2221 | else |
2222 | __release(&node->proc->inner_lock); |
2223 | trace_binder_transaction_ref_to_node(t, node, rdata: &src_rdata); |
2224 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
2225 | format: " ref %d desc %d -> node %d u%016llx\n" , |
2226 | src_rdata.debug_id, src_rdata.desc, node->debug_id, |
2227 | (u64)node->ptr); |
2228 | binder_node_unlock(node); |
2229 | } else { |
2230 | struct binder_ref_data dest_rdata; |
2231 | |
2232 | binder_node_unlock(node); |
2233 | ret = binder_inc_ref_for_node(proc: target_proc, node, |
2234 | strong: fp->hdr.type == BINDER_TYPE_HANDLE, |
2235 | NULL, rdata: &dest_rdata); |
2236 | if (ret) |
2237 | goto done; |
2238 | |
2239 | fp->binder = 0; |
2240 | fp->handle = dest_rdata.desc; |
2241 | fp->cookie = 0; |
2242 | trace_binder_transaction_ref_to_ref(t, node, src_ref: &src_rdata, |
2243 | dest_ref: &dest_rdata); |
2244 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
2245 | format: " ref %d desc %d -> ref %d desc %d (node %d)\n" , |
2246 | src_rdata.debug_id, src_rdata.desc, |
2247 | dest_rdata.debug_id, dest_rdata.desc, |
2248 | node->debug_id); |
2249 | } |
2250 | done: |
2251 | binder_put_node(node); |
2252 | return ret; |
2253 | } |
2254 | |
2255 | static int binder_translate_fd(u32 fd, binder_size_t fd_offset, |
2256 | struct binder_transaction *t, |
2257 | struct binder_thread *thread, |
2258 | struct binder_transaction *in_reply_to) |
2259 | { |
2260 | struct binder_proc *proc = thread->proc; |
2261 | struct binder_proc *target_proc = t->to_proc; |
2262 | struct binder_txn_fd_fixup *fixup; |
2263 | struct file *file; |
2264 | int ret = 0; |
2265 | bool target_allows_fd; |
2266 | |
2267 | if (in_reply_to) |
2268 | target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); |
2269 | else |
2270 | target_allows_fd = t->buffer->target_node->accept_fds; |
2271 | if (!target_allows_fd) { |
2272 | binder_user_error(format: "%d:%d got %s with fd, %d, but target does not allow fds\n" , |
2273 | proc->pid, thread->pid, |
2274 | in_reply_to ? "reply" : "transaction" , |
2275 | fd); |
2276 | ret = -EPERM; |
2277 | goto err_fd_not_accepted; |
2278 | } |
2279 | |
2280 | file = fget(fd); |
2281 | if (!file) { |
2282 | binder_user_error(format: "%d:%d got transaction with invalid fd, %d\n" , |
2283 | proc->pid, thread->pid, fd); |
2284 | ret = -EBADF; |
2285 | goto err_fget; |
2286 | } |
2287 | ret = security_binder_transfer_file(from: proc->cred, to: target_proc->cred, file); |
2288 | if (ret < 0) { |
2289 | ret = -EPERM; |
2290 | goto err_security; |
2291 | } |
2292 | |
2293 | /* |
2294 | * Add fixup record for this transaction. The allocation |
2295 | * of the fd in the target needs to be done from a |
2296 | * target thread. |
2297 | */ |
2298 | fixup = kzalloc(size: sizeof(*fixup), GFP_KERNEL); |
2299 | if (!fixup) { |
2300 | ret = -ENOMEM; |
2301 | goto err_alloc; |
2302 | } |
2303 | fixup->file = file; |
2304 | fixup->offset = fd_offset; |
2305 | fixup->target_fd = -1; |
2306 | trace_binder_transaction_fd_send(t, fd, offset: fixup->offset); |
2307 | list_add_tail(new: &fixup->fixup_entry, head: &t->fd_fixups); |
2308 | |
2309 | return ret; |
2310 | |
2311 | err_alloc: |
2312 | err_security: |
2313 | fput(file); |
2314 | err_fget: |
2315 | err_fd_not_accepted: |
2316 | return ret; |
2317 | } |
2318 | |
2319 | /** |
2320 | * struct binder_ptr_fixup - data to be fixed-up in target buffer |
2321 | * @offset offset in target buffer to fixup |
2322 | * @skip_size bytes to skip in copy (fixup will be written later) |
2323 | * @fixup_data data to write at fixup offset |
2324 | * @node list node |
2325 | * |
2326 | * This is used for the pointer fixup list (pf) which is created and consumed |
2327 | * during binder_transaction() and is only accessed locally. No |
2328 | * locking is necessary. |
2329 | * |
2330 | * The list is ordered by @offset. |
2331 | */ |
2332 | struct binder_ptr_fixup { |
2333 | binder_size_t offset; |
2334 | size_t skip_size; |
2335 | binder_uintptr_t fixup_data; |
2336 | struct list_head node; |
2337 | }; |
2338 | |
2339 | /** |
2340 | * struct binder_sg_copy - scatter-gather data to be copied |
2341 | * @offset offset in target buffer |
2342 | * @sender_uaddr user address in source buffer |
2343 | * @length bytes to copy |
2344 | * @node list node |
2345 | * |
2346 | * This is used for the sg copy list (sgc) which is created and consumed |
2347 | * during binder_transaction() and is only accessed locally. No |
2348 | * locking is necessary. |
2349 | * |
2350 | * The list is ordered by @offset. |
2351 | */ |
2352 | struct binder_sg_copy { |
2353 | binder_size_t offset; |
2354 | const void __user *sender_uaddr; |
2355 | size_t length; |
2356 | struct list_head node; |
2357 | }; |
2358 | |
2359 | /** |
2360 | * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data |
2361 | * @alloc: binder_alloc associated with @buffer |
2362 | * @buffer: binder buffer in target process |
2363 | * @sgc_head: list_head of scatter-gather copy list |
2364 | * @pf_head: list_head of pointer fixup list |
2365 | * |
2366 | * Processes all elements of @sgc_head, applying fixups from @pf_head |
2367 | * and copying the scatter-gather data from the source process' user |
2368 | * buffer to the target's buffer. It is expected that the list creation |
2369 | * and processing all occurs during binder_transaction() so these lists |
2370 | * are only accessed in local context. |
2371 | * |
2372 | * Return: 0=success, else -errno |
2373 | */ |
2374 | static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, |
2375 | struct binder_buffer *buffer, |
2376 | struct list_head *sgc_head, |
2377 | struct list_head *pf_head) |
2378 | { |
2379 | int ret = 0; |
2380 | struct binder_sg_copy *sgc, *tmpsgc; |
2381 | struct binder_ptr_fixup *tmppf; |
2382 | struct binder_ptr_fixup *pf = |
2383 | list_first_entry_or_null(pf_head, struct binder_ptr_fixup, |
2384 | node); |
2385 | |
2386 | list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { |
2387 | size_t bytes_copied = 0; |
2388 | |
2389 | while (bytes_copied < sgc->length) { |
2390 | size_t copy_size; |
2391 | size_t bytes_left = sgc->length - bytes_copied; |
2392 | size_t offset = sgc->offset + bytes_copied; |
2393 | |
2394 | /* |
2395 | * We copy up to the fixup (pointed to by pf) |
2396 | */ |
2397 | copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) |
2398 | : bytes_left; |
2399 | if (!ret && copy_size) |
2400 | ret = binder_alloc_copy_user_to_buffer( |
2401 | alloc, buffer, |
2402 | buffer_offset: offset, |
2403 | from: sgc->sender_uaddr + bytes_copied, |
2404 | bytes: copy_size); |
2405 | bytes_copied += copy_size; |
2406 | if (copy_size != bytes_left) { |
2407 | BUG_ON(!pf); |
2408 | /* we stopped at a fixup offset */ |
2409 | if (pf->skip_size) { |
2410 | /* |
2411 | * we are just skipping. This is for |
2412 | * BINDER_TYPE_FDA where the translated |
2413 | * fds will be fixed up when we get |
2414 | * to target context. |
2415 | */ |
2416 | bytes_copied += pf->skip_size; |
2417 | } else { |
2418 | /* apply the fixup indicated by pf */ |
2419 | if (!ret) |
2420 | ret = binder_alloc_copy_to_buffer( |
2421 | alloc, buffer, |
2422 | buffer_offset: pf->offset, |
2423 | src: &pf->fixup_data, |
2424 | bytes: sizeof(pf->fixup_data)); |
2425 | bytes_copied += sizeof(pf->fixup_data); |
2426 | } |
2427 | list_del(entry: &pf->node); |
2428 | kfree(objp: pf); |
2429 | pf = list_first_entry_or_null(pf_head, |
2430 | struct binder_ptr_fixup, node); |
2431 | } |
2432 | } |
2433 | list_del(entry: &sgc->node); |
2434 | kfree(objp: sgc); |
2435 | } |
2436 | list_for_each_entry_safe(pf, tmppf, pf_head, node) { |
2437 | BUG_ON(pf->skip_size == 0); |
2438 | list_del(entry: &pf->node); |
2439 | kfree(objp: pf); |
2440 | } |
2441 | BUG_ON(!list_empty(sgc_head)); |
2442 | |
2443 | return ret > 0 ? -EINVAL : ret; |
2444 | } |
2445 | |
2446 | /** |
2447 | * binder_cleanup_deferred_txn_lists() - free specified lists |
2448 | * @sgc_head: list_head of scatter-gather copy list |
2449 | * @pf_head: list_head of pointer fixup list |
2450 | * |
2451 | * Called to clean up @sgc_head and @pf_head if there is an |
2452 | * error. |
2453 | */ |
2454 | static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, |
2455 | struct list_head *pf_head) |
2456 | { |
2457 | struct binder_sg_copy *sgc, *tmpsgc; |
2458 | struct binder_ptr_fixup *pf, *tmppf; |
2459 | |
2460 | list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { |
2461 | list_del(entry: &sgc->node); |
2462 | kfree(objp: sgc); |
2463 | } |
2464 | list_for_each_entry_safe(pf, tmppf, pf_head, node) { |
2465 | list_del(entry: &pf->node); |
2466 | kfree(objp: pf); |
2467 | } |
2468 | } |
2469 | |
2470 | /** |
2471 | * binder_defer_copy() - queue a scatter-gather buffer for copy |
2472 | * @sgc_head: list_head of scatter-gather copy list |
2473 | * @offset: binder buffer offset in target process |
2474 | * @sender_uaddr: user address in source process |
2475 | * @length: bytes to copy |
2476 | * |
2477 | * Specify a scatter-gather block to be copied. The actual copy must |
2478 | * be deferred until all the needed fixups are identified and queued. |
2479 | * Then the copy and fixups are done together so un-translated values |
2480 | * from the source are never visible in the target buffer. |
2481 | * |
2482 | * We are guaranteed that repeated calls to this function will have |
2483 | * monotonically increasing @offset values so the list will naturally |
2484 | * be ordered. |
2485 | * |
2486 | * Return: 0=success, else -errno |
2487 | */ |
2488 | static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, |
2489 | const void __user *sender_uaddr, size_t length) |
2490 | { |
2491 | struct binder_sg_copy *bc = kzalloc(size: sizeof(*bc), GFP_KERNEL); |
2492 | |
2493 | if (!bc) |
2494 | return -ENOMEM; |
2495 | |
2496 | bc->offset = offset; |
2497 | bc->sender_uaddr = sender_uaddr; |
2498 | bc->length = length; |
2499 | INIT_LIST_HEAD(list: &bc->node); |
2500 | |
2501 | /* |
2502 | * We are guaranteed that the deferred copies are in-order |
2503 | * so just add to the tail. |
2504 | */ |
2505 | list_add_tail(new: &bc->node, head: sgc_head); |
2506 | |
2507 | return 0; |
2508 | } |
2509 | |
2510 | /** |
2511 | * binder_add_fixup() - queue a fixup to be applied to sg copy |
2512 | * @pf_head: list_head of binder ptr fixup list |
2513 | * @offset: binder buffer offset in target process |
2514 | * @fixup: bytes to be copied for fixup |
2515 | * @skip_size: bytes to skip when copying (fixup will be applied later) |
2516 | * |
2517 | * Add the specified fixup to a list ordered by @offset. When copying |
2518 | * the scatter-gather buffers, the fixup will be copied instead of |
2519 | * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup |
2520 | * will be applied later (in target process context), so we just skip |
2521 | * the bytes specified by @skip_size. If @skip_size is 0, we copy the |
2522 | * value in @fixup. |
2523 | * |
2524 | * This function is called *mostly* in @offset order, but there are |
2525 | * exceptions. Since out-of-order inserts are relatively uncommon, |
2526 | * we insert the new element by searching backward from the tail of |
2527 | * the list. |
2528 | * |
2529 | * Return: 0=success, else -errno |
2530 | */ |
2531 | static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, |
2532 | binder_uintptr_t fixup, size_t skip_size) |
2533 | { |
2534 | struct binder_ptr_fixup *pf = kzalloc(size: sizeof(*pf), GFP_KERNEL); |
2535 | struct binder_ptr_fixup *tmppf; |
2536 | |
2537 | if (!pf) |
2538 | return -ENOMEM; |
2539 | |
2540 | pf->offset = offset; |
2541 | pf->fixup_data = fixup; |
2542 | pf->skip_size = skip_size; |
2543 | INIT_LIST_HEAD(list: &pf->node); |
2544 | |
2545 | /* Fixups are *mostly* added in-order, but there are some |
2546 | * exceptions. Look backwards through list for insertion point. |
2547 | */ |
2548 | list_for_each_entry_reverse(tmppf, pf_head, node) { |
2549 | if (tmppf->offset < pf->offset) { |
2550 | list_add(new: &pf->node, head: &tmppf->node); |
2551 | return 0; |
2552 | } |
2553 | } |
2554 | /* |
2555 | * if we get here, then the new offset is the lowest so |
2556 | * insert at the head |
2557 | */ |
2558 | list_add(new: &pf->node, head: pf_head); |
2559 | return 0; |
2560 | } |
2561 | |
2562 | static int binder_translate_fd_array(struct list_head *pf_head, |
2563 | struct binder_fd_array_object *fda, |
2564 | const void __user *sender_ubuffer, |
2565 | struct binder_buffer_object *parent, |
2566 | struct binder_buffer_object *sender_uparent, |
2567 | struct binder_transaction *t, |
2568 | struct binder_thread *thread, |
2569 | struct binder_transaction *in_reply_to) |
2570 | { |
2571 | binder_size_t fdi, fd_buf_size; |
2572 | binder_size_t fda_offset; |
2573 | const void __user *sender_ufda_base; |
2574 | struct binder_proc *proc = thread->proc; |
2575 | int ret; |
2576 | |
2577 | if (fda->num_fds == 0) |
2578 | return 0; |
2579 | |
2580 | fd_buf_size = sizeof(u32) * fda->num_fds; |
2581 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { |
2582 | binder_user_error(format: "%d:%d got transaction with invalid number of fds (%lld)\n" , |
2583 | proc->pid, thread->pid, (u64)fda->num_fds); |
2584 | return -EINVAL; |
2585 | } |
2586 | if (fd_buf_size > parent->length || |
2587 | fda->parent_offset > parent->length - fd_buf_size) { |
2588 | /* No space for all file descriptors here. */ |
2589 | binder_user_error(format: "%d:%d not enough space to store %lld fds in buffer\n" , |
2590 | proc->pid, thread->pid, (u64)fda->num_fds); |
2591 | return -EINVAL; |
2592 | } |
2593 | /* |
2594 | * the source data for binder_buffer_object is visible |
2595 | * to user-space and the @buffer element is the user |
2596 | * pointer to the buffer_object containing the fd_array. |
2597 | * Convert the address to an offset relative to |
2598 | * the base of the transaction buffer. |
2599 | */ |
2600 | fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + |
2601 | fda->parent_offset; |
2602 | sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + |
2603 | fda->parent_offset; |
2604 | |
2605 | if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || |
2606 | !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { |
2607 | binder_user_error(format: "%d:%d parent offset not aligned correctly.\n" , |
2608 | proc->pid, thread->pid); |
2609 | return -EINVAL; |
2610 | } |
2611 | ret = binder_add_fixup(pf_head, offset: fda_offset, fixup: 0, skip_size: fda->num_fds * sizeof(u32)); |
2612 | if (ret) |
2613 | return ret; |
2614 | |
2615 | for (fdi = 0; fdi < fda->num_fds; fdi++) { |
2616 | u32 fd; |
2617 | binder_size_t offset = fda_offset + fdi * sizeof(fd); |
2618 | binder_size_t sender_uoffset = fdi * sizeof(fd); |
2619 | |
2620 | ret = copy_from_user(to: &fd, from: sender_ufda_base + sender_uoffset, n: sizeof(fd)); |
2621 | if (!ret) |
2622 | ret = binder_translate_fd(fd, fd_offset: offset, t, thread, |
2623 | in_reply_to); |
2624 | if (ret) |
2625 | return ret > 0 ? -EINVAL : ret; |
2626 | } |
2627 | return 0; |
2628 | } |
2629 | |
2630 | static int binder_fixup_parent(struct list_head *pf_head, |
2631 | struct binder_transaction *t, |
2632 | struct binder_thread *thread, |
2633 | struct binder_buffer_object *bp, |
2634 | binder_size_t off_start_offset, |
2635 | binder_size_t num_valid, |
2636 | binder_size_t last_fixup_obj_off, |
2637 | binder_size_t last_fixup_min_off) |
2638 | { |
2639 | struct binder_buffer_object *parent; |
2640 | struct binder_buffer *b = t->buffer; |
2641 | struct binder_proc *proc = thread->proc; |
2642 | struct binder_proc *target_proc = t->to_proc; |
2643 | struct binder_object object; |
2644 | binder_size_t buffer_offset; |
2645 | binder_size_t parent_offset; |
2646 | |
2647 | if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) |
2648 | return 0; |
2649 | |
2650 | parent = binder_validate_ptr(proc: target_proc, b, object: &object, index: bp->parent, |
2651 | start_offset: off_start_offset, object_offsetp: &parent_offset, |
2652 | num_valid); |
2653 | if (!parent) { |
2654 | binder_user_error(format: "%d:%d got transaction with invalid parent offset or type\n" , |
2655 | proc->pid, thread->pid); |
2656 | return -EINVAL; |
2657 | } |
2658 | |
2659 | if (!binder_validate_fixup(proc: target_proc, b, objects_start_offset: off_start_offset, |
2660 | buffer_obj_offset: parent_offset, fixup_offset: bp->parent_offset, |
2661 | last_obj_offset: last_fixup_obj_off, |
2662 | last_min_offset: last_fixup_min_off)) { |
2663 | binder_user_error(format: "%d:%d got transaction with out-of-order buffer fixup\n" , |
2664 | proc->pid, thread->pid); |
2665 | return -EINVAL; |
2666 | } |
2667 | |
2668 | if (parent->length < sizeof(binder_uintptr_t) || |
2669 | bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { |
2670 | /* No space for a pointer here! */ |
2671 | binder_user_error(format: "%d:%d got transaction with invalid parent offset\n" , |
2672 | proc->pid, thread->pid); |
2673 | return -EINVAL; |
2674 | } |
2675 | buffer_offset = bp->parent_offset + |
2676 | (uintptr_t)parent->buffer - (uintptr_t)b->user_data; |
2677 | return binder_add_fixup(pf_head, offset: buffer_offset, fixup: bp->buffer, skip_size: 0); |
2678 | } |
2679 | |
2680 | /** |
2681 | * binder_can_update_transaction() - Can a txn be superseded by an updated one? |
2682 | * @t1: the pending async txn in the frozen process |
2683 | * @t2: the new async txn to supersede the outdated pending one |
2684 | * |
2685 | * Return: true if t2 can supersede t1 |
2686 | * false if t2 can not supersede t1 |
2687 | */ |
2688 | static bool binder_can_update_transaction(struct binder_transaction *t1, |
2689 | struct binder_transaction *t2) |
2690 | { |
2691 | if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != |
2692 | (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) |
2693 | return false; |
2694 | if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && |
2695 | t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && |
2696 | t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && |
2697 | t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) |
2698 | return true; |
2699 | return false; |
2700 | } |
2701 | |
2702 | /** |
2703 | * binder_find_outdated_transaction_ilocked() - Find the outdated transaction |
2704 | * @t: new async transaction |
2705 | * @target_list: list to find outdated transaction |
2706 | * |
2707 | * Return: the outdated transaction if found |
2708 | * NULL if no outdated transacton can be found |
2709 | * |
2710 | * Requires the proc->inner_lock to be held. |
2711 | */ |
2712 | static struct binder_transaction * |
2713 | binder_find_outdated_transaction_ilocked(struct binder_transaction *t, |
2714 | struct list_head *target_list) |
2715 | { |
2716 | struct binder_work *w; |
2717 | |
2718 | list_for_each_entry(w, target_list, entry) { |
2719 | struct binder_transaction *t_queued; |
2720 | |
2721 | if (w->type != BINDER_WORK_TRANSACTION) |
2722 | continue; |
2723 | t_queued = container_of(w, struct binder_transaction, work); |
2724 | if (binder_can_update_transaction(t1: t_queued, t2: t)) |
2725 | return t_queued; |
2726 | } |
2727 | return NULL; |
2728 | } |
2729 | |
2730 | /** |
2731 | * binder_proc_transaction() - sends a transaction to a process and wakes it up |
2732 | * @t: transaction to send |
2733 | * @proc: process to send the transaction to |
2734 | * @thread: thread in @proc to send the transaction to (may be NULL) |
2735 | * |
2736 | * This function queues a transaction to the specified process. It will try |
2737 | * to find a thread in the target process to handle the transaction and |
2738 | * wake it up. If no thread is found, the work is queued to the proc |
2739 | * waitqueue. |
2740 | * |
2741 | * If the @thread parameter is not NULL, the transaction is always queued |
2742 | * to the waitlist of that specific thread. |
2743 | * |
2744 | * Return: 0 if the transaction was successfully queued |
2745 | * BR_DEAD_REPLY if the target process or thread is dead |
2746 | * BR_FROZEN_REPLY if the target process or thread is frozen and |
2747 | * the sync transaction was rejected |
2748 | * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen |
2749 | * and the async transaction was successfully queued |
2750 | */ |
2751 | static int binder_proc_transaction(struct binder_transaction *t, |
2752 | struct binder_proc *proc, |
2753 | struct binder_thread *thread) |
2754 | { |
2755 | struct binder_node *node = t->buffer->target_node; |
2756 | bool oneway = !!(t->flags & TF_ONE_WAY); |
2757 | bool pending_async = false; |
2758 | struct binder_transaction *t_outdated = NULL; |
2759 | bool frozen = false; |
2760 | |
2761 | BUG_ON(!node); |
2762 | binder_node_lock(node); |
2763 | if (oneway) { |
2764 | BUG_ON(thread); |
2765 | if (node->has_async_transaction) |
2766 | pending_async = true; |
2767 | else |
2768 | node->has_async_transaction = true; |
2769 | } |
2770 | |
2771 | binder_inner_proc_lock(proc); |
2772 | if (proc->is_frozen) { |
2773 | frozen = true; |
2774 | proc->sync_recv |= !oneway; |
2775 | proc->async_recv |= oneway; |
2776 | } |
2777 | |
2778 | if ((frozen && !oneway) || proc->is_dead || |
2779 | (thread && thread->is_dead)) { |
2780 | binder_inner_proc_unlock(proc); |
2781 | binder_node_unlock(node); |
2782 | return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; |
2783 | } |
2784 | |
2785 | if (!thread && !pending_async) |
2786 | thread = binder_select_thread_ilocked(proc); |
2787 | |
2788 | if (thread) { |
2789 | binder_enqueue_thread_work_ilocked(thread, work: &t->work); |
2790 | } else if (!pending_async) { |
2791 | binder_enqueue_work_ilocked(work: &t->work, target_list: &proc->todo); |
2792 | } else { |
2793 | if ((t->flags & TF_UPDATE_TXN) && frozen) { |
2794 | t_outdated = binder_find_outdated_transaction_ilocked(t, |
2795 | target_list: &node->async_todo); |
2796 | if (t_outdated) { |
2797 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
2798 | format: "txn %d supersedes %d\n" , |
2799 | t->debug_id, t_outdated->debug_id); |
2800 | list_del_init(entry: &t_outdated->work.entry); |
2801 | proc->outstanding_txns--; |
2802 | } |
2803 | } |
2804 | binder_enqueue_work_ilocked(work: &t->work, target_list: &node->async_todo); |
2805 | } |
2806 | |
2807 | if (!pending_async) |
2808 | binder_wakeup_thread_ilocked(proc, thread, sync: !oneway /* sync */); |
2809 | |
2810 | proc->outstanding_txns++; |
2811 | binder_inner_proc_unlock(proc); |
2812 | binder_node_unlock(node); |
2813 | |
2814 | /* |
2815 | * To reduce potential contention, free the outdated transaction and |
2816 | * buffer after releasing the locks. |
2817 | */ |
2818 | if (t_outdated) { |
2819 | struct binder_buffer *buffer = t_outdated->buffer; |
2820 | |
2821 | t_outdated->buffer = NULL; |
2822 | buffer->transaction = NULL; |
2823 | trace_binder_transaction_update_buffer_release(buffer); |
2824 | binder_release_entire_buffer(proc, NULL, buffer, is_failure: false); |
2825 | binder_alloc_free_buf(alloc: &proc->alloc, buffer); |
2826 | kfree(objp: t_outdated); |
2827 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION); |
2828 | } |
2829 | |
2830 | if (oneway && frozen) |
2831 | return BR_TRANSACTION_PENDING_FROZEN; |
2832 | |
2833 | return 0; |
2834 | } |
2835 | |
2836 | /** |
2837 | * binder_get_node_refs_for_txn() - Get required refs on node for txn |
2838 | * @node: struct binder_node for which to get refs |
2839 | * @procp: returns @node->proc if valid |
2840 | * @error: if no @procp then returns BR_DEAD_REPLY |
2841 | * |
2842 | * User-space normally keeps the node alive when creating a transaction |
2843 | * since it has a reference to the target. The local strong ref keeps it |
2844 | * alive if the sending process dies before the target process processes |
2845 | * the transaction. If the source process is malicious or has a reference |
2846 | * counting bug, relying on the local strong ref can fail. |
2847 | * |
2848 | * Since user-space can cause the local strong ref to go away, we also take |
2849 | * a tmpref on the node to ensure it survives while we are constructing |
2850 | * the transaction. We also need a tmpref on the proc while we are |
2851 | * constructing the transaction, so we take that here as well. |
2852 | * |
2853 | * Return: The target_node with refs taken or NULL if no @node->proc is NULL. |
2854 | * Also sets @procp if valid. If the @node->proc is NULL indicating that the |
2855 | * target proc has died, @error is set to BR_DEAD_REPLY. |
2856 | */ |
2857 | static struct binder_node *binder_get_node_refs_for_txn( |
2858 | struct binder_node *node, |
2859 | struct binder_proc **procp, |
2860 | uint32_t *error) |
2861 | { |
2862 | struct binder_node *target_node = NULL; |
2863 | |
2864 | binder_node_inner_lock(node); |
2865 | if (node->proc) { |
2866 | target_node = node; |
2867 | binder_inc_node_nilocked(node, strong: 1, internal: 0, NULL); |
2868 | binder_inc_node_tmpref_ilocked(node); |
2869 | node->proc->tmp_ref++; |
2870 | *procp = node->proc; |
2871 | } else |
2872 | *error = BR_DEAD_REPLY; |
2873 | binder_node_inner_unlock(node); |
2874 | |
2875 | return target_node; |
2876 | } |
2877 | |
2878 | static void binder_set_txn_from_error(struct binder_transaction *t, int id, |
2879 | uint32_t command, int32_t param) |
2880 | { |
2881 | struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); |
2882 | |
2883 | if (!from) { |
2884 | /* annotation for sparse */ |
2885 | __release(&from->proc->inner_lock); |
2886 | return; |
2887 | } |
2888 | |
2889 | /* don't override existing errors */ |
2890 | if (from->ee.command == BR_OK) |
2891 | binder_set_extended_error(&from->ee, id, command, param); |
2892 | binder_inner_proc_unlock(from->proc); |
2893 | binder_thread_dec_tmpref(thread: from); |
2894 | } |
2895 | |
2896 | static void binder_transaction(struct binder_proc *proc, |
2897 | struct binder_thread *thread, |
2898 | struct binder_transaction_data *tr, int reply, |
2899 | binder_size_t ) |
2900 | { |
2901 | int ret; |
2902 | struct binder_transaction *t; |
2903 | struct binder_work *w; |
2904 | struct binder_work *tcomplete; |
2905 | binder_size_t buffer_offset = 0; |
2906 | binder_size_t off_start_offset, off_end_offset; |
2907 | binder_size_t off_min; |
2908 | binder_size_t sg_buf_offset, sg_buf_end_offset; |
2909 | binder_size_t user_offset = 0; |
2910 | struct binder_proc *target_proc = NULL; |
2911 | struct binder_thread *target_thread = NULL; |
2912 | struct binder_node *target_node = NULL; |
2913 | struct binder_transaction *in_reply_to = NULL; |
2914 | struct binder_transaction_log_entry *e; |
2915 | uint32_t return_error = 0; |
2916 | uint32_t return_error_param = 0; |
2917 | uint32_t return_error_line = 0; |
2918 | binder_size_t last_fixup_obj_off = 0; |
2919 | binder_size_t last_fixup_min_off = 0; |
2920 | struct binder_context *context = proc->context; |
2921 | int t_debug_id = atomic_inc_return(v: &binder_last_id); |
2922 | ktime_t t_start_time = ktime_get(); |
2923 | char *secctx = NULL; |
2924 | u32 secctx_sz = 0; |
2925 | struct list_head sgc_head; |
2926 | struct list_head pf_head; |
2927 | const void __user *user_buffer = (const void __user *) |
2928 | (uintptr_t)tr->data.ptr.buffer; |
2929 | INIT_LIST_HEAD(list: &sgc_head); |
2930 | INIT_LIST_HEAD(list: &pf_head); |
2931 | |
2932 | e = binder_transaction_log_add(log: &binder_transaction_log); |
2933 | e->debug_id = t_debug_id; |
2934 | e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); |
2935 | e->from_proc = proc->pid; |
2936 | e->from_thread = thread->pid; |
2937 | e->target_handle = tr->target.handle; |
2938 | e->data_size = tr->data_size; |
2939 | e->offsets_size = tr->offsets_size; |
2940 | strscpy(p: e->context_name, q: proc->context->name, BINDERFS_MAX_NAME); |
2941 | |
2942 | binder_inner_proc_lock(proc); |
2943 | binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); |
2944 | binder_inner_proc_unlock(proc); |
2945 | |
2946 | if (reply) { |
2947 | binder_inner_proc_lock(proc); |
2948 | in_reply_to = thread->transaction_stack; |
2949 | if (in_reply_to == NULL) { |
2950 | binder_inner_proc_unlock(proc); |
2951 | binder_user_error(format: "%d:%d got reply transaction with no transaction stack\n" , |
2952 | proc->pid, thread->pid); |
2953 | return_error = BR_FAILED_REPLY; |
2954 | return_error_param = -EPROTO; |
2955 | return_error_line = __LINE__; |
2956 | goto err_empty_call_stack; |
2957 | } |
2958 | if (in_reply_to->to_thread != thread) { |
2959 | spin_lock(lock: &in_reply_to->lock); |
2960 | binder_user_error(format: "%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n" , |
2961 | proc->pid, thread->pid, in_reply_to->debug_id, |
2962 | in_reply_to->to_proc ? |
2963 | in_reply_to->to_proc->pid : 0, |
2964 | in_reply_to->to_thread ? |
2965 | in_reply_to->to_thread->pid : 0); |
2966 | spin_unlock(lock: &in_reply_to->lock); |
2967 | binder_inner_proc_unlock(proc); |
2968 | return_error = BR_FAILED_REPLY; |
2969 | return_error_param = -EPROTO; |
2970 | return_error_line = __LINE__; |
2971 | in_reply_to = NULL; |
2972 | goto err_bad_call_stack; |
2973 | } |
2974 | thread->transaction_stack = in_reply_to->to_parent; |
2975 | binder_inner_proc_unlock(proc); |
2976 | binder_set_nice(nice: in_reply_to->saved_priority); |
2977 | target_thread = binder_get_txn_from_and_acq_inner(t: in_reply_to); |
2978 | if (target_thread == NULL) { |
2979 | /* annotation for sparse */ |
2980 | __release(&target_thread->proc->inner_lock); |
2981 | binder_txn_error("%d:%d reply target not found\n" , |
2982 | thread->pid, proc->pid); |
2983 | return_error = BR_DEAD_REPLY; |
2984 | return_error_line = __LINE__; |
2985 | goto err_dead_binder; |
2986 | } |
2987 | if (target_thread->transaction_stack != in_reply_to) { |
2988 | binder_user_error(format: "%d:%d got reply transaction with bad target transaction stack %d, expected %d\n" , |
2989 | proc->pid, thread->pid, |
2990 | target_thread->transaction_stack ? |
2991 | target_thread->transaction_stack->debug_id : 0, |
2992 | in_reply_to->debug_id); |
2993 | binder_inner_proc_unlock(target_thread->proc); |
2994 | return_error = BR_FAILED_REPLY; |
2995 | return_error_param = -EPROTO; |
2996 | return_error_line = __LINE__; |
2997 | in_reply_to = NULL; |
2998 | target_thread = NULL; |
2999 | goto err_dead_binder; |
3000 | } |
3001 | target_proc = target_thread->proc; |
3002 | target_proc->tmp_ref++; |
3003 | binder_inner_proc_unlock(target_thread->proc); |
3004 | } else { |
3005 | if (tr->target.handle) { |
3006 | struct binder_ref *ref; |
3007 | |
3008 | /* |
3009 | * There must already be a strong ref |
3010 | * on this node. If so, do a strong |
3011 | * increment on the node to ensure it |
3012 | * stays alive until the transaction is |
3013 | * done. |
3014 | */ |
3015 | binder_proc_lock(proc); |
3016 | ref = binder_get_ref_olocked(proc, desc: tr->target.handle, |
3017 | need_strong_ref: true); |
3018 | if (ref) { |
3019 | target_node = binder_get_node_refs_for_txn( |
3020 | node: ref->node, procp: &target_proc, |
3021 | error: &return_error); |
3022 | } else { |
3023 | binder_user_error(format: "%d:%d got transaction to invalid handle, %u\n" , |
3024 | proc->pid, thread->pid, tr->target.handle); |
3025 | return_error = BR_FAILED_REPLY; |
3026 | } |
3027 | binder_proc_unlock(proc); |
3028 | } else { |
3029 | mutex_lock(&context->context_mgr_node_lock); |
3030 | target_node = context->binder_context_mgr_node; |
3031 | if (target_node) |
3032 | target_node = binder_get_node_refs_for_txn( |
3033 | node: target_node, procp: &target_proc, |
3034 | error: &return_error); |
3035 | else |
3036 | return_error = BR_DEAD_REPLY; |
3037 | mutex_unlock(lock: &context->context_mgr_node_lock); |
3038 | if (target_node && target_proc->pid == proc->pid) { |
3039 | binder_user_error(format: "%d:%d got transaction to context manager from process owning it\n" , |
3040 | proc->pid, thread->pid); |
3041 | return_error = BR_FAILED_REPLY; |
3042 | return_error_param = -EINVAL; |
3043 | return_error_line = __LINE__; |
3044 | goto err_invalid_target_handle; |
3045 | } |
3046 | } |
3047 | if (!target_node) { |
3048 | binder_txn_error("%d:%d cannot find target node\n" , |
3049 | thread->pid, proc->pid); |
3050 | /* |
3051 | * return_error is set above |
3052 | */ |
3053 | return_error_param = -EINVAL; |
3054 | return_error_line = __LINE__; |
3055 | goto err_dead_binder; |
3056 | } |
3057 | e->to_node = target_node->debug_id; |
3058 | if (WARN_ON(proc == target_proc)) { |
3059 | binder_txn_error("%d:%d self transactions not allowed\n" , |
3060 | thread->pid, proc->pid); |
3061 | return_error = BR_FAILED_REPLY; |
3062 | return_error_param = -EINVAL; |
3063 | return_error_line = __LINE__; |
3064 | goto err_invalid_target_handle; |
3065 | } |
3066 | if (security_binder_transaction(from: proc->cred, |
3067 | to: target_proc->cred) < 0) { |
3068 | binder_txn_error("%d:%d transaction credentials failed\n" , |
3069 | thread->pid, proc->pid); |
3070 | return_error = BR_FAILED_REPLY; |
3071 | return_error_param = -EPERM; |
3072 | return_error_line = __LINE__; |
3073 | goto err_invalid_target_handle; |
3074 | } |
3075 | binder_inner_proc_lock(proc); |
3076 | |
3077 | w = list_first_entry_or_null(&thread->todo, |
3078 | struct binder_work, entry); |
3079 | if (!(tr->flags & TF_ONE_WAY) && w && |
3080 | w->type == BINDER_WORK_TRANSACTION) { |
3081 | /* |
3082 | * Do not allow new outgoing transaction from a |
3083 | * thread that has a transaction at the head of |
3084 | * its todo list. Only need to check the head |
3085 | * because binder_select_thread_ilocked picks a |
3086 | * thread from proc->waiting_threads to enqueue |
3087 | * the transaction, and nothing is queued to the |
3088 | * todo list while the thread is on waiting_threads. |
3089 | */ |
3090 | binder_user_error(format: "%d:%d new transaction not allowed when there is a transaction on thread todo\n" , |
3091 | proc->pid, thread->pid); |
3092 | binder_inner_proc_unlock(proc); |
3093 | return_error = BR_FAILED_REPLY; |
3094 | return_error_param = -EPROTO; |
3095 | return_error_line = __LINE__; |
3096 | goto err_bad_todo_list; |
3097 | } |
3098 | |
3099 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { |
3100 | struct binder_transaction *tmp; |
3101 | |
3102 | tmp = thread->transaction_stack; |
3103 | if (tmp->to_thread != thread) { |
3104 | spin_lock(lock: &tmp->lock); |
3105 | binder_user_error(format: "%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n" , |
3106 | proc->pid, thread->pid, tmp->debug_id, |
3107 | tmp->to_proc ? tmp->to_proc->pid : 0, |
3108 | tmp->to_thread ? |
3109 | tmp->to_thread->pid : 0); |
3110 | spin_unlock(lock: &tmp->lock); |
3111 | binder_inner_proc_unlock(proc); |
3112 | return_error = BR_FAILED_REPLY; |
3113 | return_error_param = -EPROTO; |
3114 | return_error_line = __LINE__; |
3115 | goto err_bad_call_stack; |
3116 | } |
3117 | while (tmp) { |
3118 | struct binder_thread *from; |
3119 | |
3120 | spin_lock(lock: &tmp->lock); |
3121 | from = tmp->from; |
3122 | if (from && from->proc == target_proc) { |
3123 | atomic_inc(v: &from->tmp_ref); |
3124 | target_thread = from; |
3125 | spin_unlock(lock: &tmp->lock); |
3126 | break; |
3127 | } |
3128 | spin_unlock(lock: &tmp->lock); |
3129 | tmp = tmp->from_parent; |
3130 | } |
3131 | } |
3132 | binder_inner_proc_unlock(proc); |
3133 | } |
3134 | if (target_thread) |
3135 | e->to_thread = target_thread->pid; |
3136 | e->to_proc = target_proc->pid; |
3137 | |
3138 | /* TODO: reuse incoming transaction for reply */ |
3139 | t = kzalloc(size: sizeof(*t), GFP_KERNEL); |
3140 | if (t == NULL) { |
3141 | binder_txn_error("%d:%d cannot allocate transaction\n" , |
3142 | thread->pid, proc->pid); |
3143 | return_error = BR_FAILED_REPLY; |
3144 | return_error_param = -ENOMEM; |
3145 | return_error_line = __LINE__; |
3146 | goto err_alloc_t_failed; |
3147 | } |
3148 | INIT_LIST_HEAD(list: &t->fd_fixups); |
3149 | binder_stats_created(type: BINDER_STAT_TRANSACTION); |
3150 | spin_lock_init(&t->lock); |
3151 | |
3152 | tcomplete = kzalloc(size: sizeof(*tcomplete), GFP_KERNEL); |
3153 | if (tcomplete == NULL) { |
3154 | binder_txn_error("%d:%d cannot allocate work for transaction\n" , |
3155 | thread->pid, proc->pid); |
3156 | return_error = BR_FAILED_REPLY; |
3157 | return_error_param = -ENOMEM; |
3158 | return_error_line = __LINE__; |
3159 | goto err_alloc_tcomplete_failed; |
3160 | } |
3161 | binder_stats_created(type: BINDER_STAT_TRANSACTION_COMPLETE); |
3162 | |
3163 | t->debug_id = t_debug_id; |
3164 | t->start_time = t_start_time; |
3165 | |
3166 | if (reply) |
3167 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
3168 | format: "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n" , |
3169 | proc->pid, thread->pid, t->debug_id, |
3170 | target_proc->pid, target_thread->pid, |
3171 | (u64)tr->data.ptr.buffer, |
3172 | (u64)tr->data.ptr.offsets, |
3173 | (u64)tr->data_size, (u64)tr->offsets_size, |
3174 | (u64)extra_buffers_size); |
3175 | else |
3176 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
3177 | format: "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n" , |
3178 | proc->pid, thread->pid, t->debug_id, |
3179 | target_proc->pid, target_node->debug_id, |
3180 | (u64)tr->data.ptr.buffer, |
3181 | (u64)tr->data.ptr.offsets, |
3182 | (u64)tr->data_size, (u64)tr->offsets_size, |
3183 | (u64)extra_buffers_size); |
3184 | |
3185 | if (!reply && !(tr->flags & TF_ONE_WAY)) |
3186 | t->from = thread; |
3187 | else |
3188 | t->from = NULL; |
3189 | t->from_pid = proc->pid; |
3190 | t->from_tid = thread->pid; |
3191 | t->sender_euid = task_euid(proc->tsk); |
3192 | t->to_proc = target_proc; |
3193 | t->to_thread = target_thread; |
3194 | t->code = tr->code; |
3195 | t->flags = tr->flags; |
3196 | t->priority = task_nice(current); |
3197 | |
3198 | if (target_node && target_node->txn_security_ctx) { |
3199 | u32 secid; |
3200 | size_t added_size; |
3201 | |
3202 | security_cred_getsecid(c: proc->cred, secid: &secid); |
3203 | ret = security_secid_to_secctx(secid, secdata: &secctx, seclen: &secctx_sz); |
3204 | if (ret) { |
3205 | binder_txn_error("%d:%d failed to get security context\n" , |
3206 | thread->pid, proc->pid); |
3207 | return_error = BR_FAILED_REPLY; |
3208 | return_error_param = ret; |
3209 | return_error_line = __LINE__; |
3210 | goto err_get_secctx_failed; |
3211 | } |
3212 | added_size = ALIGN(secctx_sz, sizeof(u64)); |
3213 | extra_buffers_size += added_size; |
3214 | if (extra_buffers_size < added_size) { |
3215 | binder_txn_error("%d:%d integer overflow of extra_buffers_size\n" , |
3216 | thread->pid, proc->pid); |
3217 | return_error = BR_FAILED_REPLY; |
3218 | return_error_param = -EINVAL; |
3219 | return_error_line = __LINE__; |
3220 | goto err_bad_extra_size; |
3221 | } |
3222 | } |
3223 | |
3224 | trace_binder_transaction(reply, t, target_node); |
3225 | |
3226 | t->buffer = binder_alloc_new_buf(alloc: &target_proc->alloc, data_size: tr->data_size, |
3227 | offsets_size: tr->offsets_size, extra_buffers_size, |
3228 | is_async: !reply && (t->flags & TF_ONE_WAY), current->tgid); |
3229 | if (IS_ERR(ptr: t->buffer)) { |
3230 | char *s; |
3231 | |
3232 | ret = PTR_ERR(ptr: t->buffer); |
3233 | s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" |
3234 | : (ret == -ENOSPC) ? ": no space left" |
3235 | : (ret == -ENOMEM) ? ": memory allocation failed" |
3236 | : "" ; |
3237 | binder_txn_error("cannot allocate buffer%s" , s); |
3238 | |
3239 | return_error_param = PTR_ERR(ptr: t->buffer); |
3240 | return_error = return_error_param == -ESRCH ? |
3241 | BR_DEAD_REPLY : BR_FAILED_REPLY; |
3242 | return_error_line = __LINE__; |
3243 | t->buffer = NULL; |
3244 | goto err_binder_alloc_buf_failed; |
3245 | } |
3246 | if (secctx) { |
3247 | int err; |
3248 | size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + |
3249 | ALIGN(tr->offsets_size, sizeof(void *)) + |
3250 | ALIGN(extra_buffers_size, sizeof(void *)) - |
3251 | ALIGN(secctx_sz, sizeof(u64)); |
3252 | |
3253 | t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; |
3254 | err = binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3255 | buffer: t->buffer, buffer_offset: buf_offset, |
3256 | src: secctx, bytes: secctx_sz); |
3257 | if (err) { |
3258 | t->security_ctx = 0; |
3259 | WARN_ON(1); |
3260 | } |
3261 | security_release_secctx(secdata: secctx, seclen: secctx_sz); |
3262 | secctx = NULL; |
3263 | } |
3264 | t->buffer->debug_id = t->debug_id; |
3265 | t->buffer->transaction = t; |
3266 | t->buffer->target_node = target_node; |
3267 | t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); |
3268 | trace_binder_transaction_alloc_buf(buffer: t->buffer); |
3269 | |
3270 | if (binder_alloc_copy_user_to_buffer( |
3271 | alloc: &target_proc->alloc, |
3272 | buffer: t->buffer, |
3273 | ALIGN(tr->data_size, sizeof(void *)), |
3274 | from: (const void __user *) |
3275 | (uintptr_t)tr->data.ptr.offsets, |
3276 | bytes: tr->offsets_size)) { |
3277 | binder_user_error(format: "%d:%d got transaction with invalid offsets ptr\n" , |
3278 | proc->pid, thread->pid); |
3279 | return_error = BR_FAILED_REPLY; |
3280 | return_error_param = -EFAULT; |
3281 | return_error_line = __LINE__; |
3282 | goto err_copy_data_failed; |
3283 | } |
3284 | if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { |
3285 | binder_user_error(format: "%d:%d got transaction with invalid offsets size, %lld\n" , |
3286 | proc->pid, thread->pid, (u64)tr->offsets_size); |
3287 | return_error = BR_FAILED_REPLY; |
3288 | return_error_param = -EINVAL; |
3289 | return_error_line = __LINE__; |
3290 | goto err_bad_offset; |
3291 | } |
3292 | if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { |
3293 | binder_user_error(format: "%d:%d got transaction with unaligned buffers size, %lld\n" , |
3294 | proc->pid, thread->pid, |
3295 | (u64)extra_buffers_size); |
3296 | return_error = BR_FAILED_REPLY; |
3297 | return_error_param = -EINVAL; |
3298 | return_error_line = __LINE__; |
3299 | goto err_bad_offset; |
3300 | } |
3301 | off_start_offset = ALIGN(tr->data_size, sizeof(void *)); |
3302 | buffer_offset = off_start_offset; |
3303 | off_end_offset = off_start_offset + tr->offsets_size; |
3304 | sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); |
3305 | sg_buf_end_offset = sg_buf_offset + extra_buffers_size - |
3306 | ALIGN(secctx_sz, sizeof(u64)); |
3307 | off_min = 0; |
3308 | for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; |
3309 | buffer_offset += sizeof(binder_size_t)) { |
3310 | struct binder_object_header *hdr; |
3311 | size_t object_size; |
3312 | struct binder_object object; |
3313 | binder_size_t object_offset; |
3314 | binder_size_t copy_size; |
3315 | |
3316 | if (binder_alloc_copy_from_buffer(alloc: &target_proc->alloc, |
3317 | dest: &object_offset, |
3318 | buffer: t->buffer, |
3319 | buffer_offset, |
3320 | bytes: sizeof(object_offset))) { |
3321 | binder_txn_error("%d:%d copy offset from buffer failed\n" , |
3322 | thread->pid, proc->pid); |
3323 | return_error = BR_FAILED_REPLY; |
3324 | return_error_param = -EINVAL; |
3325 | return_error_line = __LINE__; |
3326 | goto err_bad_offset; |
3327 | } |
3328 | |
3329 | /* |
3330 | * Copy the source user buffer up to the next object |
3331 | * that will be processed. |
3332 | */ |
3333 | copy_size = object_offset - user_offset; |
3334 | if (copy_size && (user_offset > object_offset || |
3335 | binder_alloc_copy_user_to_buffer( |
3336 | alloc: &target_proc->alloc, |
3337 | buffer: t->buffer, buffer_offset: user_offset, |
3338 | from: user_buffer + user_offset, |
3339 | bytes: copy_size))) { |
3340 | binder_user_error(format: "%d:%d got transaction with invalid data ptr\n" , |
3341 | proc->pid, thread->pid); |
3342 | return_error = BR_FAILED_REPLY; |
3343 | return_error_param = -EFAULT; |
3344 | return_error_line = __LINE__; |
3345 | goto err_copy_data_failed; |
3346 | } |
3347 | object_size = binder_get_object(proc: target_proc, u: user_buffer, |
3348 | buffer: t->buffer, offset: object_offset, object: &object); |
3349 | if (object_size == 0 || object_offset < off_min) { |
3350 | binder_user_error(format: "%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n" , |
3351 | proc->pid, thread->pid, |
3352 | (u64)object_offset, |
3353 | (u64)off_min, |
3354 | (u64)t->buffer->data_size); |
3355 | return_error = BR_FAILED_REPLY; |
3356 | return_error_param = -EINVAL; |
3357 | return_error_line = __LINE__; |
3358 | goto err_bad_offset; |
3359 | } |
3360 | /* |
3361 | * Set offset to the next buffer fragment to be |
3362 | * copied |
3363 | */ |
3364 | user_offset = object_offset + object_size; |
3365 | |
3366 | hdr = &object.hdr; |
3367 | off_min = object_offset + object_size; |
3368 | switch (hdr->type) { |
3369 | case BINDER_TYPE_BINDER: |
3370 | case BINDER_TYPE_WEAK_BINDER: { |
3371 | struct flat_binder_object *fp; |
3372 | |
3373 | fp = to_flat_binder_object(hdr); |
3374 | ret = binder_translate_binder(fp, t, thread); |
3375 | |
3376 | if (ret < 0 || |
3377 | binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3378 | buffer: t->buffer, |
3379 | buffer_offset: object_offset, |
3380 | src: fp, bytes: sizeof(*fp))) { |
3381 | binder_txn_error("%d:%d translate binder failed\n" , |
3382 | thread->pid, proc->pid); |
3383 | return_error = BR_FAILED_REPLY; |
3384 | return_error_param = ret; |
3385 | return_error_line = __LINE__; |
3386 | goto err_translate_failed; |
3387 | } |
3388 | } break; |
3389 | case BINDER_TYPE_HANDLE: |
3390 | case BINDER_TYPE_WEAK_HANDLE: { |
3391 | struct flat_binder_object *fp; |
3392 | |
3393 | fp = to_flat_binder_object(hdr); |
3394 | ret = binder_translate_handle(fp, t, thread); |
3395 | if (ret < 0 || |
3396 | binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3397 | buffer: t->buffer, |
3398 | buffer_offset: object_offset, |
3399 | src: fp, bytes: sizeof(*fp))) { |
3400 | binder_txn_error("%d:%d translate handle failed\n" , |
3401 | thread->pid, proc->pid); |
3402 | return_error = BR_FAILED_REPLY; |
3403 | return_error_param = ret; |
3404 | return_error_line = __LINE__; |
3405 | goto err_translate_failed; |
3406 | } |
3407 | } break; |
3408 | |
3409 | case BINDER_TYPE_FD: { |
3410 | struct binder_fd_object *fp = to_binder_fd_object(hdr); |
3411 | binder_size_t fd_offset = object_offset + |
3412 | (uintptr_t)&fp->fd - (uintptr_t)fp; |
3413 | int ret = binder_translate_fd(fd: fp->fd, fd_offset, t, |
3414 | thread, in_reply_to); |
3415 | |
3416 | fp->pad_binder = 0; |
3417 | if (ret < 0 || |
3418 | binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3419 | buffer: t->buffer, |
3420 | buffer_offset: object_offset, |
3421 | src: fp, bytes: sizeof(*fp))) { |
3422 | binder_txn_error("%d:%d translate fd failed\n" , |
3423 | thread->pid, proc->pid); |
3424 | return_error = BR_FAILED_REPLY; |
3425 | return_error_param = ret; |
3426 | return_error_line = __LINE__; |
3427 | goto err_translate_failed; |
3428 | } |
3429 | } break; |
3430 | case BINDER_TYPE_FDA: { |
3431 | struct binder_object ptr_object; |
3432 | binder_size_t parent_offset; |
3433 | struct binder_object user_object; |
3434 | size_t user_parent_size; |
3435 | struct binder_fd_array_object *fda = |
3436 | to_binder_fd_array_object(hdr); |
3437 | size_t num_valid = (buffer_offset - off_start_offset) / |
3438 | sizeof(binder_size_t); |
3439 | struct binder_buffer_object *parent = |
3440 | binder_validate_ptr(proc: target_proc, b: t->buffer, |
3441 | object: &ptr_object, index: fda->parent, |
3442 | start_offset: off_start_offset, |
3443 | object_offsetp: &parent_offset, |
3444 | num_valid); |
3445 | if (!parent) { |
3446 | binder_user_error(format: "%d:%d got transaction with invalid parent offset or type\n" , |
3447 | proc->pid, thread->pid); |
3448 | return_error = BR_FAILED_REPLY; |
3449 | return_error_param = -EINVAL; |
3450 | return_error_line = __LINE__; |
3451 | goto err_bad_parent; |
3452 | } |
3453 | if (!binder_validate_fixup(proc: target_proc, b: t->buffer, |
3454 | objects_start_offset: off_start_offset, |
3455 | buffer_obj_offset: parent_offset, |
3456 | fixup_offset: fda->parent_offset, |
3457 | last_obj_offset: last_fixup_obj_off, |
3458 | last_min_offset: last_fixup_min_off)) { |
3459 | binder_user_error(format: "%d:%d got transaction with out-of-order buffer fixup\n" , |
3460 | proc->pid, thread->pid); |
3461 | return_error = BR_FAILED_REPLY; |
3462 | return_error_param = -EINVAL; |
3463 | return_error_line = __LINE__; |
3464 | goto err_bad_parent; |
3465 | } |
3466 | /* |
3467 | * We need to read the user version of the parent |
3468 | * object to get the original user offset |
3469 | */ |
3470 | user_parent_size = |
3471 | binder_get_object(proc, u: user_buffer, buffer: t->buffer, |
3472 | offset: parent_offset, object: &user_object); |
3473 | if (user_parent_size != sizeof(user_object.bbo)) { |
3474 | binder_user_error(format: "%d:%d invalid ptr object size: %zd vs %zd\n" , |
3475 | proc->pid, thread->pid, |
3476 | user_parent_size, |
3477 | sizeof(user_object.bbo)); |
3478 | return_error = BR_FAILED_REPLY; |
3479 | return_error_param = -EINVAL; |
3480 | return_error_line = __LINE__; |
3481 | goto err_bad_parent; |
3482 | } |
3483 | ret = binder_translate_fd_array(pf_head: &pf_head, fda, |
3484 | sender_ubuffer: user_buffer, parent, |
3485 | sender_uparent: &user_object.bbo, t, |
3486 | thread, in_reply_to); |
3487 | if (!ret) |
3488 | ret = binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3489 | buffer: t->buffer, |
3490 | buffer_offset: object_offset, |
3491 | src: fda, bytes: sizeof(*fda)); |
3492 | if (ret) { |
3493 | binder_txn_error("%d:%d translate fd array failed\n" , |
3494 | thread->pid, proc->pid); |
3495 | return_error = BR_FAILED_REPLY; |
3496 | return_error_param = ret > 0 ? -EINVAL : ret; |
3497 | return_error_line = __LINE__; |
3498 | goto err_translate_failed; |
3499 | } |
3500 | last_fixup_obj_off = parent_offset; |
3501 | last_fixup_min_off = |
3502 | fda->parent_offset + sizeof(u32) * fda->num_fds; |
3503 | } break; |
3504 | case BINDER_TYPE_PTR: { |
3505 | struct binder_buffer_object *bp = |
3506 | to_binder_buffer_object(hdr); |
3507 | size_t buf_left = sg_buf_end_offset - sg_buf_offset; |
3508 | size_t num_valid; |
3509 | |
3510 | if (bp->length > buf_left) { |
3511 | binder_user_error(format: "%d:%d got transaction with too large buffer\n" , |
3512 | proc->pid, thread->pid); |
3513 | return_error = BR_FAILED_REPLY; |
3514 | return_error_param = -EINVAL; |
3515 | return_error_line = __LINE__; |
3516 | goto err_bad_offset; |
3517 | } |
3518 | ret = binder_defer_copy(sgc_head: &sgc_head, offset: sg_buf_offset, |
3519 | sender_uaddr: (const void __user *)(uintptr_t)bp->buffer, |
3520 | length: bp->length); |
3521 | if (ret) { |
3522 | binder_txn_error("%d:%d deferred copy failed\n" , |
3523 | thread->pid, proc->pid); |
3524 | return_error = BR_FAILED_REPLY; |
3525 | return_error_param = ret; |
3526 | return_error_line = __LINE__; |
3527 | goto err_translate_failed; |
3528 | } |
3529 | /* Fixup buffer pointer to target proc address space */ |
3530 | bp->buffer = (uintptr_t) |
3531 | t->buffer->user_data + sg_buf_offset; |
3532 | sg_buf_offset += ALIGN(bp->length, sizeof(u64)); |
3533 | |
3534 | num_valid = (buffer_offset - off_start_offset) / |
3535 | sizeof(binder_size_t); |
3536 | ret = binder_fixup_parent(pf_head: &pf_head, t, |
3537 | thread, bp, |
3538 | off_start_offset, |
3539 | num_valid, |
3540 | last_fixup_obj_off, |
3541 | last_fixup_min_off); |
3542 | if (ret < 0 || |
3543 | binder_alloc_copy_to_buffer(alloc: &target_proc->alloc, |
3544 | buffer: t->buffer, |
3545 | buffer_offset: object_offset, |
3546 | src: bp, bytes: sizeof(*bp))) { |
3547 | binder_txn_error("%d:%d failed to fixup parent\n" , |
3548 | thread->pid, proc->pid); |
3549 | return_error = BR_FAILED_REPLY; |
3550 | return_error_param = ret; |
3551 | return_error_line = __LINE__; |
3552 | goto err_translate_failed; |
3553 | } |
3554 | last_fixup_obj_off = object_offset; |
3555 | last_fixup_min_off = 0; |
3556 | } break; |
3557 | default: |
3558 | binder_user_error(format: "%d:%d got transaction with invalid object type, %x\n" , |
3559 | proc->pid, thread->pid, hdr->type); |
3560 | return_error = BR_FAILED_REPLY; |
3561 | return_error_param = -EINVAL; |
3562 | return_error_line = __LINE__; |
3563 | goto err_bad_object_type; |
3564 | } |
3565 | } |
3566 | /* Done processing objects, copy the rest of the buffer */ |
3567 | if (binder_alloc_copy_user_to_buffer( |
3568 | alloc: &target_proc->alloc, |
3569 | buffer: t->buffer, buffer_offset: user_offset, |
3570 | from: user_buffer + user_offset, |
3571 | bytes: tr->data_size - user_offset)) { |
3572 | binder_user_error(format: "%d:%d got transaction with invalid data ptr\n" , |
3573 | proc->pid, thread->pid); |
3574 | return_error = BR_FAILED_REPLY; |
3575 | return_error_param = -EFAULT; |
3576 | return_error_line = __LINE__; |
3577 | goto err_copy_data_failed; |
3578 | } |
3579 | |
3580 | ret = binder_do_deferred_txn_copies(alloc: &target_proc->alloc, buffer: t->buffer, |
3581 | sgc_head: &sgc_head, pf_head: &pf_head); |
3582 | if (ret) { |
3583 | binder_user_error(format: "%d:%d got transaction with invalid offsets ptr\n" , |
3584 | proc->pid, thread->pid); |
3585 | return_error = BR_FAILED_REPLY; |
3586 | return_error_param = ret; |
3587 | return_error_line = __LINE__; |
3588 | goto err_copy_data_failed; |
3589 | } |
3590 | if (t->buffer->oneway_spam_suspect) |
3591 | tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; |
3592 | else |
3593 | tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; |
3594 | t->work.type = BINDER_WORK_TRANSACTION; |
3595 | |
3596 | if (reply) { |
3597 | binder_enqueue_thread_work(thread, work: tcomplete); |
3598 | binder_inner_proc_lock(target_proc); |
3599 | if (target_thread->is_dead) { |
3600 | return_error = BR_DEAD_REPLY; |
3601 | binder_inner_proc_unlock(target_proc); |
3602 | goto err_dead_proc_or_thread; |
3603 | } |
3604 | BUG_ON(t->buffer->async_transaction != 0); |
3605 | binder_pop_transaction_ilocked(target_thread, t: in_reply_to); |
3606 | binder_enqueue_thread_work_ilocked(thread: target_thread, work: &t->work); |
3607 | target_proc->outstanding_txns++; |
3608 | binder_inner_proc_unlock(target_proc); |
3609 | wake_up_interruptible_sync(&target_thread->wait); |
3610 | binder_free_transaction(t: in_reply_to); |
3611 | } else if (!(t->flags & TF_ONE_WAY)) { |
3612 | BUG_ON(t->buffer->async_transaction != 0); |
3613 | binder_inner_proc_lock(proc); |
3614 | /* |
3615 | * Defer the TRANSACTION_COMPLETE, so we don't return to |
3616 | * userspace immediately; this allows the target process to |
3617 | * immediately start processing this transaction, reducing |
3618 | * latency. We will then return the TRANSACTION_COMPLETE when |
3619 | * the target replies (or there is an error). |
3620 | */ |
3621 | binder_enqueue_deferred_thread_work_ilocked(thread, work: tcomplete); |
3622 | t->need_reply = 1; |
3623 | t->from_parent = thread->transaction_stack; |
3624 | thread->transaction_stack = t; |
3625 | binder_inner_proc_unlock(proc); |
3626 | return_error = binder_proc_transaction(t, |
3627 | proc: target_proc, thread: target_thread); |
3628 | if (return_error) { |
3629 | binder_inner_proc_lock(proc); |
3630 | binder_pop_transaction_ilocked(target_thread: thread, t); |
3631 | binder_inner_proc_unlock(proc); |
3632 | goto err_dead_proc_or_thread; |
3633 | } |
3634 | } else { |
3635 | BUG_ON(target_node == NULL); |
3636 | BUG_ON(t->buffer->async_transaction != 1); |
3637 | return_error = binder_proc_transaction(t, proc: target_proc, NULL); |
3638 | /* |
3639 | * Let the caller know when async transaction reaches a frozen |
3640 | * process and is put in a pending queue, waiting for the target |
3641 | * process to be unfrozen. |
3642 | */ |
3643 | if (return_error == BR_TRANSACTION_PENDING_FROZEN) |
3644 | tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; |
3645 | binder_enqueue_thread_work(thread, work: tcomplete); |
3646 | if (return_error && |
3647 | return_error != BR_TRANSACTION_PENDING_FROZEN) |
3648 | goto err_dead_proc_or_thread; |
3649 | } |
3650 | if (target_thread) |
3651 | binder_thread_dec_tmpref(thread: target_thread); |
3652 | binder_proc_dec_tmpref(proc: target_proc); |
3653 | if (target_node) |
3654 | binder_dec_node_tmpref(node: target_node); |
3655 | /* |
3656 | * write barrier to synchronize with initialization |
3657 | * of log entry |
3658 | */ |
3659 | smp_wmb(); |
3660 | WRITE_ONCE(e->debug_id_done, t_debug_id); |
3661 | return; |
3662 | |
3663 | err_dead_proc_or_thread: |
3664 | binder_txn_error("%d:%d dead process or thread\n" , |
3665 | thread->pid, proc->pid); |
3666 | return_error_line = __LINE__; |
3667 | binder_dequeue_work(proc, work: tcomplete); |
3668 | err_translate_failed: |
3669 | err_bad_object_type: |
3670 | err_bad_offset: |
3671 | err_bad_parent: |
3672 | err_copy_data_failed: |
3673 | binder_cleanup_deferred_txn_lists(sgc_head: &sgc_head, pf_head: &pf_head); |
3674 | binder_free_txn_fixups(t); |
3675 | trace_binder_transaction_failed_buffer_release(buffer: t->buffer); |
3676 | binder_transaction_buffer_release(proc: target_proc, NULL, buffer: t->buffer, |
3677 | off_end_offset: buffer_offset, is_failure: true); |
3678 | if (target_node) |
3679 | binder_dec_node_tmpref(node: target_node); |
3680 | target_node = NULL; |
3681 | t->buffer->transaction = NULL; |
3682 | binder_alloc_free_buf(alloc: &target_proc->alloc, buffer: t->buffer); |
3683 | err_binder_alloc_buf_failed: |
3684 | : |
3685 | if (secctx) |
3686 | security_release_secctx(secdata: secctx, seclen: secctx_sz); |
3687 | err_get_secctx_failed: |
3688 | kfree(objp: tcomplete); |
3689 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION_COMPLETE); |
3690 | err_alloc_tcomplete_failed: |
3691 | if (trace_binder_txn_latency_free_enabled()) |
3692 | binder_txn_latency_free(t); |
3693 | kfree(objp: t); |
3694 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION); |
3695 | err_alloc_t_failed: |
3696 | err_bad_todo_list: |
3697 | err_bad_call_stack: |
3698 | err_empty_call_stack: |
3699 | err_dead_binder: |
3700 | err_invalid_target_handle: |
3701 | if (target_node) { |
3702 | binder_dec_node(node: target_node, strong: 1, internal: 0); |
3703 | binder_dec_node_tmpref(node: target_node); |
3704 | } |
3705 | |
3706 | binder_debug(mask: BINDER_DEBUG_FAILED_TRANSACTION, |
3707 | format: "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n" , |
3708 | proc->pid, thread->pid, reply ? "reply" : |
3709 | (tr->flags & TF_ONE_WAY ? "async" : "call" ), |
3710 | target_proc ? target_proc->pid : 0, |
3711 | target_thread ? target_thread->pid : 0, |
3712 | t_debug_id, return_error, return_error_param, |
3713 | (u64)tr->data_size, (u64)tr->offsets_size, |
3714 | return_error_line); |
3715 | |
3716 | if (target_thread) |
3717 | binder_thread_dec_tmpref(thread: target_thread); |
3718 | if (target_proc) |
3719 | binder_proc_dec_tmpref(proc: target_proc); |
3720 | |
3721 | { |
3722 | struct binder_transaction_log_entry *fe; |
3723 | |
3724 | e->return_error = return_error; |
3725 | e->return_error_param = return_error_param; |
3726 | e->return_error_line = return_error_line; |
3727 | fe = binder_transaction_log_add(log: &binder_transaction_log_failed); |
3728 | *fe = *e; |
3729 | /* |
3730 | * write barrier to synchronize with initialization |
3731 | * of log entry |
3732 | */ |
3733 | smp_wmb(); |
3734 | WRITE_ONCE(e->debug_id_done, t_debug_id); |
3735 | WRITE_ONCE(fe->debug_id_done, t_debug_id); |
3736 | } |
3737 | |
3738 | BUG_ON(thread->return_error.cmd != BR_OK); |
3739 | if (in_reply_to) { |
3740 | binder_set_txn_from_error(t: in_reply_to, id: t_debug_id, |
3741 | command: return_error, param: return_error_param); |
3742 | thread->return_error.cmd = BR_TRANSACTION_COMPLETE; |
3743 | binder_enqueue_thread_work(thread, work: &thread->return_error.work); |
3744 | binder_send_failed_reply(t: in_reply_to, error_code: return_error); |
3745 | } else { |
3746 | binder_inner_proc_lock(proc); |
3747 | binder_set_extended_error(&thread->ee, t_debug_id, |
3748 | return_error, return_error_param); |
3749 | binder_inner_proc_unlock(proc); |
3750 | thread->return_error.cmd = return_error; |
3751 | binder_enqueue_thread_work(thread, work: &thread->return_error.work); |
3752 | } |
3753 | } |
3754 | |
3755 | /** |
3756 | * binder_free_buf() - free the specified buffer |
3757 | * @proc: binder proc that owns buffer |
3758 | * @buffer: buffer to be freed |
3759 | * @is_failure: failed to send transaction |
3760 | * |
3761 | * If buffer for an async transaction, enqueue the next async |
3762 | * transaction from the node. |
3763 | * |
3764 | * Cleanup buffer and free it. |
3765 | */ |
3766 | static void |
3767 | binder_free_buf(struct binder_proc *proc, |
3768 | struct binder_thread *thread, |
3769 | struct binder_buffer *buffer, bool is_failure) |
3770 | { |
3771 | binder_inner_proc_lock(proc); |
3772 | if (buffer->transaction) { |
3773 | buffer->transaction->buffer = NULL; |
3774 | buffer->transaction = NULL; |
3775 | } |
3776 | binder_inner_proc_unlock(proc); |
3777 | if (buffer->async_transaction && buffer->target_node) { |
3778 | struct binder_node *buf_node; |
3779 | struct binder_work *w; |
3780 | |
3781 | buf_node = buffer->target_node; |
3782 | binder_node_inner_lock(buf_node); |
3783 | BUG_ON(!buf_node->has_async_transaction); |
3784 | BUG_ON(buf_node->proc != proc); |
3785 | w = binder_dequeue_work_head_ilocked( |
3786 | list: &buf_node->async_todo); |
3787 | if (!w) { |
3788 | buf_node->has_async_transaction = false; |
3789 | } else { |
3790 | binder_enqueue_work_ilocked( |
3791 | work: w, target_list: &proc->todo); |
3792 | binder_wakeup_proc_ilocked(proc); |
3793 | } |
3794 | binder_node_inner_unlock(buf_node); |
3795 | } |
3796 | trace_binder_transaction_buffer_release(buffer); |
3797 | binder_release_entire_buffer(proc, thread, buffer, is_failure); |
3798 | binder_alloc_free_buf(alloc: &proc->alloc, buffer); |
3799 | } |
3800 | |
3801 | static int binder_thread_write(struct binder_proc *proc, |
3802 | struct binder_thread *thread, |
3803 | binder_uintptr_t binder_buffer, size_t size, |
3804 | binder_size_t *consumed) |
3805 | { |
3806 | uint32_t cmd; |
3807 | struct binder_context *context = proc->context; |
3808 | void __user *buffer = (void __user *)(uintptr_t)binder_buffer; |
3809 | void __user *ptr = buffer + *consumed; |
3810 | void __user *end = buffer + size; |
3811 | |
3812 | while (ptr < end && thread->return_error.cmd == BR_OK) { |
3813 | int ret; |
3814 | |
3815 | if (get_user(cmd, (uint32_t __user *)ptr)) |
3816 | return -EFAULT; |
3817 | ptr += sizeof(uint32_t); |
3818 | trace_binder_command(cmd); |
3819 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { |
3820 | atomic_inc(v: &binder_stats.bc[_IOC_NR(cmd)]); |
3821 | atomic_inc(v: &proc->stats.bc[_IOC_NR(cmd)]); |
3822 | atomic_inc(v: &thread->stats.bc[_IOC_NR(cmd)]); |
3823 | } |
3824 | switch (cmd) { |
3825 | case BC_INCREFS: |
3826 | case BC_ACQUIRE: |
3827 | case BC_RELEASE: |
3828 | case BC_DECREFS: { |
3829 | uint32_t target; |
3830 | const char *debug_string; |
3831 | bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; |
3832 | bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; |
3833 | struct binder_ref_data rdata; |
3834 | |
3835 | if (get_user(target, (uint32_t __user *)ptr)) |
3836 | return -EFAULT; |
3837 | |
3838 | ptr += sizeof(uint32_t); |
3839 | ret = -1; |
3840 | if (increment && !target) { |
3841 | struct binder_node *ctx_mgr_node; |
3842 | |
3843 | mutex_lock(&context->context_mgr_node_lock); |
3844 | ctx_mgr_node = context->binder_context_mgr_node; |
3845 | if (ctx_mgr_node) { |
3846 | if (ctx_mgr_node->proc == proc) { |
3847 | binder_user_error(format: "%d:%d context manager tried to acquire desc 0\n" , |
3848 | proc->pid, thread->pid); |
3849 | mutex_unlock(lock: &context->context_mgr_node_lock); |
3850 | return -EINVAL; |
3851 | } |
3852 | ret = binder_inc_ref_for_node( |
3853 | proc, node: ctx_mgr_node, |
3854 | strong, NULL, rdata: &rdata); |
3855 | } |
3856 | mutex_unlock(lock: &context->context_mgr_node_lock); |
3857 | } |
3858 | if (ret) |
3859 | ret = binder_update_ref_for_handle( |
3860 | proc, desc: target, increment, strong, |
3861 | rdata: &rdata); |
3862 | if (!ret && rdata.desc != target) { |
3863 | binder_user_error(format: "%d:%d tried to acquire reference to desc %d, got %d instead\n" , |
3864 | proc->pid, thread->pid, |
3865 | target, rdata.desc); |
3866 | } |
3867 | switch (cmd) { |
3868 | case BC_INCREFS: |
3869 | debug_string = "IncRefs" ; |
3870 | break; |
3871 | case BC_ACQUIRE: |
3872 | debug_string = "Acquire" ; |
3873 | break; |
3874 | case BC_RELEASE: |
3875 | debug_string = "Release" ; |
3876 | break; |
3877 | case BC_DECREFS: |
3878 | default: |
3879 | debug_string = "DecRefs" ; |
3880 | break; |
3881 | } |
3882 | if (ret) { |
3883 | binder_user_error(format: "%d:%d %s %d refcount change on invalid ref %d ret %d\n" , |
3884 | proc->pid, thread->pid, debug_string, |
3885 | strong, target, ret); |
3886 | break; |
3887 | } |
3888 | binder_debug(mask: BINDER_DEBUG_USER_REFS, |
3889 | format: "%d:%d %s ref %d desc %d s %d w %d\n" , |
3890 | proc->pid, thread->pid, debug_string, |
3891 | rdata.debug_id, rdata.desc, rdata.strong, |
3892 | rdata.weak); |
3893 | break; |
3894 | } |
3895 | case BC_INCREFS_DONE: |
3896 | case BC_ACQUIRE_DONE: { |
3897 | binder_uintptr_t node_ptr; |
3898 | binder_uintptr_t cookie; |
3899 | struct binder_node *node; |
3900 | bool free_node; |
3901 | |
3902 | if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) |
3903 | return -EFAULT; |
3904 | ptr += sizeof(binder_uintptr_t); |
3905 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) |
3906 | return -EFAULT; |
3907 | ptr += sizeof(binder_uintptr_t); |
3908 | node = binder_get_node(proc, ptr: node_ptr); |
3909 | if (node == NULL) { |
3910 | binder_user_error(format: "%d:%d %s u%016llx no match\n" , |
3911 | proc->pid, thread->pid, |
3912 | cmd == BC_INCREFS_DONE ? |
3913 | "BC_INCREFS_DONE" : |
3914 | "BC_ACQUIRE_DONE" , |
3915 | (u64)node_ptr); |
3916 | break; |
3917 | } |
3918 | if (cookie != node->cookie) { |
3919 | binder_user_error(format: "%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n" , |
3920 | proc->pid, thread->pid, |
3921 | cmd == BC_INCREFS_DONE ? |
3922 | "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE" , |
3923 | (u64)node_ptr, node->debug_id, |
3924 | (u64)cookie, (u64)node->cookie); |
3925 | binder_put_node(node); |
3926 | break; |
3927 | } |
3928 | binder_node_inner_lock(node); |
3929 | if (cmd == BC_ACQUIRE_DONE) { |
3930 | if (node->pending_strong_ref == 0) { |
3931 | binder_user_error(format: "%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n" , |
3932 | proc->pid, thread->pid, |
3933 | node->debug_id); |
3934 | binder_node_inner_unlock(node); |
3935 | binder_put_node(node); |
3936 | break; |
3937 | } |
3938 | node->pending_strong_ref = 0; |
3939 | } else { |
3940 | if (node->pending_weak_ref == 0) { |
3941 | binder_user_error(format: "%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n" , |
3942 | proc->pid, thread->pid, |
3943 | node->debug_id); |
3944 | binder_node_inner_unlock(node); |
3945 | binder_put_node(node); |
3946 | break; |
3947 | } |
3948 | node->pending_weak_ref = 0; |
3949 | } |
3950 | free_node = binder_dec_node_nilocked(node, |
3951 | strong: cmd == BC_ACQUIRE_DONE, internal: 0); |
3952 | WARN_ON(free_node); |
3953 | binder_debug(mask: BINDER_DEBUG_USER_REFS, |
3954 | format: "%d:%d %s node %d ls %d lw %d tr %d\n" , |
3955 | proc->pid, thread->pid, |
3956 | cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE" , |
3957 | node->debug_id, node->local_strong_refs, |
3958 | node->local_weak_refs, node->tmp_refs); |
3959 | binder_node_inner_unlock(node); |
3960 | binder_put_node(node); |
3961 | break; |
3962 | } |
3963 | case BC_ATTEMPT_ACQUIRE: |
3964 | pr_err("BC_ATTEMPT_ACQUIRE not supported\n" ); |
3965 | return -EINVAL; |
3966 | case BC_ACQUIRE_RESULT: |
3967 | pr_err("BC_ACQUIRE_RESULT not supported\n" ); |
3968 | return -EINVAL; |
3969 | |
3970 | case BC_FREE_BUFFER: { |
3971 | binder_uintptr_t data_ptr; |
3972 | struct binder_buffer *buffer; |
3973 | |
3974 | if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) |
3975 | return -EFAULT; |
3976 | ptr += sizeof(binder_uintptr_t); |
3977 | |
3978 | buffer = binder_alloc_prepare_to_free(alloc: &proc->alloc, |
3979 | user_ptr: data_ptr); |
3980 | if (IS_ERR_OR_NULL(ptr: buffer)) { |
3981 | if (PTR_ERR(ptr: buffer) == -EPERM) { |
3982 | binder_user_error( |
3983 | format: "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n" , |
3984 | proc->pid, thread->pid, |
3985 | (u64)data_ptr); |
3986 | } else { |
3987 | binder_user_error( |
3988 | format: "%d:%d BC_FREE_BUFFER u%016llx no match\n" , |
3989 | proc->pid, thread->pid, |
3990 | (u64)data_ptr); |
3991 | } |
3992 | break; |
3993 | } |
3994 | binder_debug(mask: BINDER_DEBUG_FREE_BUFFER, |
3995 | format: "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n" , |
3996 | proc->pid, thread->pid, (u64)data_ptr, |
3997 | buffer->debug_id, |
3998 | buffer->transaction ? "active" : "finished" ); |
3999 | binder_free_buf(proc, thread, buffer, is_failure: false); |
4000 | break; |
4001 | } |
4002 | |
4003 | case BC_TRANSACTION_SG: |
4004 | case BC_REPLY_SG: { |
4005 | struct binder_transaction_data_sg tr; |
4006 | |
4007 | if (copy_from_user(to: &tr, from: ptr, n: sizeof(tr))) |
4008 | return -EFAULT; |
4009 | ptr += sizeof(tr); |
4010 | binder_transaction(proc, thread, tr: &tr.transaction_data, |
4011 | reply: cmd == BC_REPLY_SG, extra_buffers_size: tr.buffers_size); |
4012 | break; |
4013 | } |
4014 | case BC_TRANSACTION: |
4015 | case BC_REPLY: { |
4016 | struct binder_transaction_data tr; |
4017 | |
4018 | if (copy_from_user(to: &tr, from: ptr, n: sizeof(tr))) |
4019 | return -EFAULT; |
4020 | ptr += sizeof(tr); |
4021 | binder_transaction(proc, thread, tr: &tr, |
4022 | reply: cmd == BC_REPLY, extra_buffers_size: 0); |
4023 | break; |
4024 | } |
4025 | |
4026 | case BC_REGISTER_LOOPER: |
4027 | binder_debug(mask: BINDER_DEBUG_THREADS, |
4028 | format: "%d:%d BC_REGISTER_LOOPER\n" , |
4029 | proc->pid, thread->pid); |
4030 | binder_inner_proc_lock(proc); |
4031 | if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { |
4032 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
4033 | binder_user_error(format: "%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n" , |
4034 | proc->pid, thread->pid); |
4035 | } else if (proc->requested_threads == 0) { |
4036 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
4037 | binder_user_error(format: "%d:%d ERROR: BC_REGISTER_LOOPER called without request\n" , |
4038 | proc->pid, thread->pid); |
4039 | } else { |
4040 | proc->requested_threads--; |
4041 | proc->requested_threads_started++; |
4042 | } |
4043 | thread->looper |= BINDER_LOOPER_STATE_REGISTERED; |
4044 | binder_inner_proc_unlock(proc); |
4045 | break; |
4046 | case BC_ENTER_LOOPER: |
4047 | binder_debug(mask: BINDER_DEBUG_THREADS, |
4048 | format: "%d:%d BC_ENTER_LOOPER\n" , |
4049 | proc->pid, thread->pid); |
4050 | if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { |
4051 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
4052 | binder_user_error(format: "%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n" , |
4053 | proc->pid, thread->pid); |
4054 | } |
4055 | thread->looper |= BINDER_LOOPER_STATE_ENTERED; |
4056 | break; |
4057 | case BC_EXIT_LOOPER: |
4058 | binder_debug(mask: BINDER_DEBUG_THREADS, |
4059 | format: "%d:%d BC_EXIT_LOOPER\n" , |
4060 | proc->pid, thread->pid); |
4061 | thread->looper |= BINDER_LOOPER_STATE_EXITED; |
4062 | break; |
4063 | |
4064 | case BC_REQUEST_DEATH_NOTIFICATION: |
4065 | case BC_CLEAR_DEATH_NOTIFICATION: { |
4066 | uint32_t target; |
4067 | binder_uintptr_t cookie; |
4068 | struct binder_ref *ref; |
4069 | struct binder_ref_death *death = NULL; |
4070 | |
4071 | if (get_user(target, (uint32_t __user *)ptr)) |
4072 | return -EFAULT; |
4073 | ptr += sizeof(uint32_t); |
4074 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) |
4075 | return -EFAULT; |
4076 | ptr += sizeof(binder_uintptr_t); |
4077 | if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { |
4078 | /* |
4079 | * Allocate memory for death notification |
4080 | * before taking lock |
4081 | */ |
4082 | death = kzalloc(size: sizeof(*death), GFP_KERNEL); |
4083 | if (death == NULL) { |
4084 | WARN_ON(thread->return_error.cmd != |
4085 | BR_OK); |
4086 | thread->return_error.cmd = BR_ERROR; |
4087 | binder_enqueue_thread_work( |
4088 | thread, |
4089 | work: &thread->return_error.work); |
4090 | binder_debug( |
4091 | mask: BINDER_DEBUG_FAILED_TRANSACTION, |
4092 | format: "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n" , |
4093 | proc->pid, thread->pid); |
4094 | break; |
4095 | } |
4096 | } |
4097 | binder_proc_lock(proc); |
4098 | ref = binder_get_ref_olocked(proc, desc: target, need_strong_ref: false); |
4099 | if (ref == NULL) { |
4100 | binder_user_error(format: "%d:%d %s invalid ref %d\n" , |
4101 | proc->pid, thread->pid, |
4102 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
4103 | "BC_REQUEST_DEATH_NOTIFICATION" : |
4104 | "BC_CLEAR_DEATH_NOTIFICATION" , |
4105 | target); |
4106 | binder_proc_unlock(proc); |
4107 | kfree(objp: death); |
4108 | break; |
4109 | } |
4110 | |
4111 | binder_debug(mask: BINDER_DEBUG_DEATH_NOTIFICATION, |
4112 | format: "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n" , |
4113 | proc->pid, thread->pid, |
4114 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
4115 | "BC_REQUEST_DEATH_NOTIFICATION" : |
4116 | "BC_CLEAR_DEATH_NOTIFICATION" , |
4117 | (u64)cookie, ref->data.debug_id, |
4118 | ref->data.desc, ref->data.strong, |
4119 | ref->data.weak, ref->node->debug_id); |
4120 | |
4121 | binder_node_lock(ref->node); |
4122 | if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { |
4123 | if (ref->death) { |
4124 | binder_user_error(format: "%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n" , |
4125 | proc->pid, thread->pid); |
4126 | binder_node_unlock(ref->node); |
4127 | binder_proc_unlock(proc); |
4128 | kfree(objp: death); |
4129 | break; |
4130 | } |
4131 | binder_stats_created(type: BINDER_STAT_DEATH); |
4132 | INIT_LIST_HEAD(list: &death->work.entry); |
4133 | death->cookie = cookie; |
4134 | ref->death = death; |
4135 | if (ref->node->proc == NULL) { |
4136 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
4137 | |
4138 | binder_inner_proc_lock(proc); |
4139 | binder_enqueue_work_ilocked( |
4140 | work: &ref->death->work, target_list: &proc->todo); |
4141 | binder_wakeup_proc_ilocked(proc); |
4142 | binder_inner_proc_unlock(proc); |
4143 | } |
4144 | } else { |
4145 | if (ref->death == NULL) { |
4146 | binder_user_error(format: "%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n" , |
4147 | proc->pid, thread->pid); |
4148 | binder_node_unlock(ref->node); |
4149 | binder_proc_unlock(proc); |
4150 | break; |
4151 | } |
4152 | death = ref->death; |
4153 | if (death->cookie != cookie) { |
4154 | binder_user_error(format: "%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n" , |
4155 | proc->pid, thread->pid, |
4156 | (u64)death->cookie, |
4157 | (u64)cookie); |
4158 | binder_node_unlock(ref->node); |
4159 | binder_proc_unlock(proc); |
4160 | break; |
4161 | } |
4162 | ref->death = NULL; |
4163 | binder_inner_proc_lock(proc); |
4164 | if (list_empty(head: &death->work.entry)) { |
4165 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
4166 | if (thread->looper & |
4167 | (BINDER_LOOPER_STATE_REGISTERED | |
4168 | BINDER_LOOPER_STATE_ENTERED)) |
4169 | binder_enqueue_thread_work_ilocked( |
4170 | thread, |
4171 | work: &death->work); |
4172 | else { |
4173 | binder_enqueue_work_ilocked( |
4174 | work: &death->work, |
4175 | target_list: &proc->todo); |
4176 | binder_wakeup_proc_ilocked( |
4177 | proc); |
4178 | } |
4179 | } else { |
4180 | BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); |
4181 | death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; |
4182 | } |
4183 | binder_inner_proc_unlock(proc); |
4184 | } |
4185 | binder_node_unlock(ref->node); |
4186 | binder_proc_unlock(proc); |
4187 | } break; |
4188 | case BC_DEAD_BINDER_DONE: { |
4189 | struct binder_work *w; |
4190 | binder_uintptr_t cookie; |
4191 | struct binder_ref_death *death = NULL; |
4192 | |
4193 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) |
4194 | return -EFAULT; |
4195 | |
4196 | ptr += sizeof(cookie); |
4197 | binder_inner_proc_lock(proc); |
4198 | list_for_each_entry(w, &proc->delivered_death, |
4199 | entry) { |
4200 | struct binder_ref_death *tmp_death = |
4201 | container_of(w, |
4202 | struct binder_ref_death, |
4203 | work); |
4204 | |
4205 | if (tmp_death->cookie == cookie) { |
4206 | death = tmp_death; |
4207 | break; |
4208 | } |
4209 | } |
4210 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
4211 | format: "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n" , |
4212 | proc->pid, thread->pid, (u64)cookie, |
4213 | death); |
4214 | if (death == NULL) { |
4215 | binder_user_error(format: "%d:%d BC_DEAD_BINDER_DONE %016llx not found\n" , |
4216 | proc->pid, thread->pid, (u64)cookie); |
4217 | binder_inner_proc_unlock(proc); |
4218 | break; |
4219 | } |
4220 | binder_dequeue_work_ilocked(work: &death->work); |
4221 | if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { |
4222 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
4223 | if (thread->looper & |
4224 | (BINDER_LOOPER_STATE_REGISTERED | |
4225 | BINDER_LOOPER_STATE_ENTERED)) |
4226 | binder_enqueue_thread_work_ilocked( |
4227 | thread, work: &death->work); |
4228 | else { |
4229 | binder_enqueue_work_ilocked( |
4230 | work: &death->work, |
4231 | target_list: &proc->todo); |
4232 | binder_wakeup_proc_ilocked(proc); |
4233 | } |
4234 | } |
4235 | binder_inner_proc_unlock(proc); |
4236 | } break; |
4237 | |
4238 | default: |
4239 | pr_err("%d:%d unknown command %u\n" , |
4240 | proc->pid, thread->pid, cmd); |
4241 | return -EINVAL; |
4242 | } |
4243 | *consumed = ptr - buffer; |
4244 | } |
4245 | return 0; |
4246 | } |
4247 | |
4248 | static void binder_stat_br(struct binder_proc *proc, |
4249 | struct binder_thread *thread, uint32_t cmd) |
4250 | { |
4251 | trace_binder_return(cmd); |
4252 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { |
4253 | atomic_inc(v: &binder_stats.br[_IOC_NR(cmd)]); |
4254 | atomic_inc(v: &proc->stats.br[_IOC_NR(cmd)]); |
4255 | atomic_inc(v: &thread->stats.br[_IOC_NR(cmd)]); |
4256 | } |
4257 | } |
4258 | |
4259 | static int binder_put_node_cmd(struct binder_proc *proc, |
4260 | struct binder_thread *thread, |
4261 | void __user **ptrp, |
4262 | binder_uintptr_t node_ptr, |
4263 | binder_uintptr_t node_cookie, |
4264 | int node_debug_id, |
4265 | uint32_t cmd, const char *cmd_name) |
4266 | { |
4267 | void __user *ptr = *ptrp; |
4268 | |
4269 | if (put_user(cmd, (uint32_t __user *)ptr)) |
4270 | return -EFAULT; |
4271 | ptr += sizeof(uint32_t); |
4272 | |
4273 | if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) |
4274 | return -EFAULT; |
4275 | ptr += sizeof(binder_uintptr_t); |
4276 | |
4277 | if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) |
4278 | return -EFAULT; |
4279 | ptr += sizeof(binder_uintptr_t); |
4280 | |
4281 | binder_stat_br(proc, thread, cmd); |
4282 | binder_debug(mask: BINDER_DEBUG_USER_REFS, format: "%d:%d %s %d u%016llx c%016llx\n" , |
4283 | proc->pid, thread->pid, cmd_name, node_debug_id, |
4284 | (u64)node_ptr, (u64)node_cookie); |
4285 | |
4286 | *ptrp = ptr; |
4287 | return 0; |
4288 | } |
4289 | |
4290 | static int binder_wait_for_work(struct binder_thread *thread, |
4291 | bool do_proc_work) |
4292 | { |
4293 | DEFINE_WAIT(wait); |
4294 | struct binder_proc *proc = thread->proc; |
4295 | int ret = 0; |
4296 | |
4297 | binder_inner_proc_lock(proc); |
4298 | for (;;) { |
4299 | prepare_to_wait(wq_head: &thread->wait, wq_entry: &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); |
4300 | if (binder_has_work_ilocked(thread, do_proc_work)) |
4301 | break; |
4302 | if (do_proc_work) |
4303 | list_add(new: &thread->waiting_thread_node, |
4304 | head: &proc->waiting_threads); |
4305 | binder_inner_proc_unlock(proc); |
4306 | schedule(); |
4307 | binder_inner_proc_lock(proc); |
4308 | list_del_init(entry: &thread->waiting_thread_node); |
4309 | if (signal_pending(current)) { |
4310 | ret = -EINTR; |
4311 | break; |
4312 | } |
4313 | } |
4314 | finish_wait(wq_head: &thread->wait, wq_entry: &wait); |
4315 | binder_inner_proc_unlock(proc); |
4316 | |
4317 | return ret; |
4318 | } |
4319 | |
4320 | /** |
4321 | * binder_apply_fd_fixups() - finish fd translation |
4322 | * @proc: binder_proc associated @t->buffer |
4323 | * @t: binder transaction with list of fd fixups |
4324 | * |
4325 | * Now that we are in the context of the transaction target |
4326 | * process, we can allocate and install fds. Process the |
4327 | * list of fds to translate and fixup the buffer with the |
4328 | * new fds first and only then install the files. |
4329 | * |
4330 | * If we fail to allocate an fd, skip the install and release |
4331 | * any fds that have already been allocated. |
4332 | */ |
4333 | static int binder_apply_fd_fixups(struct binder_proc *proc, |
4334 | struct binder_transaction *t) |
4335 | { |
4336 | struct binder_txn_fd_fixup *fixup, *tmp; |
4337 | int ret = 0; |
4338 | |
4339 | list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { |
4340 | int fd = get_unused_fd_flags(O_CLOEXEC); |
4341 | |
4342 | if (fd < 0) { |
4343 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
4344 | format: "failed fd fixup txn %d fd %d\n" , |
4345 | t->debug_id, fd); |
4346 | ret = -ENOMEM; |
4347 | goto err; |
4348 | } |
4349 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
4350 | format: "fd fixup txn %d fd %d\n" , |
4351 | t->debug_id, fd); |
4352 | trace_binder_transaction_fd_recv(t, fd, offset: fixup->offset); |
4353 | fixup->target_fd = fd; |
4354 | if (binder_alloc_copy_to_buffer(alloc: &proc->alloc, buffer: t->buffer, |
4355 | buffer_offset: fixup->offset, src: &fd, |
4356 | bytes: sizeof(u32))) { |
4357 | ret = -EINVAL; |
4358 | goto err; |
4359 | } |
4360 | } |
4361 | list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { |
4362 | fd_install(fd: fixup->target_fd, file: fixup->file); |
4363 | list_del(entry: &fixup->fixup_entry); |
4364 | kfree(objp: fixup); |
4365 | } |
4366 | |
4367 | return ret; |
4368 | |
4369 | err: |
4370 | binder_free_txn_fixups(t); |
4371 | return ret; |
4372 | } |
4373 | |
4374 | static int binder_thread_read(struct binder_proc *proc, |
4375 | struct binder_thread *thread, |
4376 | binder_uintptr_t binder_buffer, size_t size, |
4377 | binder_size_t *consumed, int non_block) |
4378 | { |
4379 | void __user *buffer = (void __user *)(uintptr_t)binder_buffer; |
4380 | void __user *ptr = buffer + *consumed; |
4381 | void __user *end = buffer + size; |
4382 | |
4383 | int ret = 0; |
4384 | int wait_for_proc_work; |
4385 | |
4386 | if (*consumed == 0) { |
4387 | if (put_user(BR_NOOP, (uint32_t __user *)ptr)) |
4388 | return -EFAULT; |
4389 | ptr += sizeof(uint32_t); |
4390 | } |
4391 | |
4392 | retry: |
4393 | binder_inner_proc_lock(proc); |
4394 | wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); |
4395 | binder_inner_proc_unlock(proc); |
4396 | |
4397 | thread->looper |= BINDER_LOOPER_STATE_WAITING; |
4398 | |
4399 | trace_binder_wait_for_work(proc_work: wait_for_proc_work, |
4400 | transaction_stack: !!thread->transaction_stack, |
4401 | thread_todo: !binder_worklist_empty(proc, list: &thread->todo)); |
4402 | if (wait_for_proc_work) { |
4403 | if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
4404 | BINDER_LOOPER_STATE_ENTERED))) { |
4405 | binder_user_error(format: "%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n" , |
4406 | proc->pid, thread->pid, thread->looper); |
4407 | wait_event_interruptible(binder_user_error_wait, |
4408 | binder_stop_on_user_error < 2); |
4409 | } |
4410 | binder_set_nice(nice: proc->default_priority); |
4411 | } |
4412 | |
4413 | if (non_block) { |
4414 | if (!binder_has_work(thread, do_proc_work: wait_for_proc_work)) |
4415 | ret = -EAGAIN; |
4416 | } else { |
4417 | ret = binder_wait_for_work(thread, do_proc_work: wait_for_proc_work); |
4418 | } |
4419 | |
4420 | thread->looper &= ~BINDER_LOOPER_STATE_WAITING; |
4421 | |
4422 | if (ret) |
4423 | return ret; |
4424 | |
4425 | while (1) { |
4426 | uint32_t cmd; |
4427 | struct binder_transaction_data_secctx tr; |
4428 | struct binder_transaction_data *trd = &tr.transaction_data; |
4429 | struct binder_work *w = NULL; |
4430 | struct list_head *list = NULL; |
4431 | struct binder_transaction *t = NULL; |
4432 | struct binder_thread *t_from; |
4433 | size_t trsize = sizeof(*trd); |
4434 | |
4435 | binder_inner_proc_lock(proc); |
4436 | if (!binder_worklist_empty_ilocked(list: &thread->todo)) |
4437 | list = &thread->todo; |
4438 | else if (!binder_worklist_empty_ilocked(list: &proc->todo) && |
4439 | wait_for_proc_work) |
4440 | list = &proc->todo; |
4441 | else { |
4442 | binder_inner_proc_unlock(proc); |
4443 | |
4444 | /* no data added */ |
4445 | if (ptr - buffer == 4 && !thread->looper_need_return) |
4446 | goto retry; |
4447 | break; |
4448 | } |
4449 | |
4450 | if (end - ptr < sizeof(tr) + 4) { |
4451 | binder_inner_proc_unlock(proc); |
4452 | break; |
4453 | } |
4454 | w = binder_dequeue_work_head_ilocked(list); |
4455 | if (binder_worklist_empty_ilocked(list: &thread->todo)) |
4456 | thread->process_todo = false; |
4457 | |
4458 | switch (w->type) { |
4459 | case BINDER_WORK_TRANSACTION: { |
4460 | binder_inner_proc_unlock(proc); |
4461 | t = container_of(w, struct binder_transaction, work); |
4462 | } break; |
4463 | case BINDER_WORK_RETURN_ERROR: { |
4464 | struct binder_error *e = container_of( |
4465 | w, struct binder_error, work); |
4466 | |
4467 | WARN_ON(e->cmd == BR_OK); |
4468 | binder_inner_proc_unlock(proc); |
4469 | if (put_user(e->cmd, (uint32_t __user *)ptr)) |
4470 | return -EFAULT; |
4471 | cmd = e->cmd; |
4472 | e->cmd = BR_OK; |
4473 | ptr += sizeof(uint32_t); |
4474 | |
4475 | binder_stat_br(proc, thread, cmd); |
4476 | } break; |
4477 | case BINDER_WORK_TRANSACTION_COMPLETE: |
4478 | case BINDER_WORK_TRANSACTION_PENDING: |
4479 | case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { |
4480 | if (proc->oneway_spam_detection_enabled && |
4481 | w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) |
4482 | cmd = BR_ONEWAY_SPAM_SUSPECT; |
4483 | else if (w->type == BINDER_WORK_TRANSACTION_PENDING) |
4484 | cmd = BR_TRANSACTION_PENDING_FROZEN; |
4485 | else |
4486 | cmd = BR_TRANSACTION_COMPLETE; |
4487 | binder_inner_proc_unlock(proc); |
4488 | kfree(objp: w); |
4489 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION_COMPLETE); |
4490 | if (put_user(cmd, (uint32_t __user *)ptr)) |
4491 | return -EFAULT; |
4492 | ptr += sizeof(uint32_t); |
4493 | |
4494 | binder_stat_br(proc, thread, cmd); |
4495 | binder_debug(mask: BINDER_DEBUG_TRANSACTION_COMPLETE, |
4496 | format: "%d:%d BR_TRANSACTION_COMPLETE\n" , |
4497 | proc->pid, thread->pid); |
4498 | } break; |
4499 | case BINDER_WORK_NODE: { |
4500 | struct binder_node *node = container_of(w, struct binder_node, work); |
4501 | int strong, weak; |
4502 | binder_uintptr_t node_ptr = node->ptr; |
4503 | binder_uintptr_t node_cookie = node->cookie; |
4504 | int node_debug_id = node->debug_id; |
4505 | int has_weak_ref; |
4506 | int has_strong_ref; |
4507 | void __user *orig_ptr = ptr; |
4508 | |
4509 | BUG_ON(proc != node->proc); |
4510 | strong = node->internal_strong_refs || |
4511 | node->local_strong_refs; |
4512 | weak = !hlist_empty(h: &node->refs) || |
4513 | node->local_weak_refs || |
4514 | node->tmp_refs || strong; |
4515 | has_strong_ref = node->has_strong_ref; |
4516 | has_weak_ref = node->has_weak_ref; |
4517 | |
4518 | if (weak && !has_weak_ref) { |
4519 | node->has_weak_ref = 1; |
4520 | node->pending_weak_ref = 1; |
4521 | node->local_weak_refs++; |
4522 | } |
4523 | if (strong && !has_strong_ref) { |
4524 | node->has_strong_ref = 1; |
4525 | node->pending_strong_ref = 1; |
4526 | node->local_strong_refs++; |
4527 | } |
4528 | if (!strong && has_strong_ref) |
4529 | node->has_strong_ref = 0; |
4530 | if (!weak && has_weak_ref) |
4531 | node->has_weak_ref = 0; |
4532 | if (!weak && !strong) { |
4533 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
4534 | format: "%d:%d node %d u%016llx c%016llx deleted\n" , |
4535 | proc->pid, thread->pid, |
4536 | node_debug_id, |
4537 | (u64)node_ptr, |
4538 | (u64)node_cookie); |
4539 | rb_erase(&node->rb_node, &proc->nodes); |
4540 | binder_inner_proc_unlock(proc); |
4541 | binder_node_lock(node); |
4542 | /* |
4543 | * Acquire the node lock before freeing the |
4544 | * node to serialize with other threads that |
4545 | * may have been holding the node lock while |
4546 | * decrementing this node (avoids race where |
4547 | * this thread frees while the other thread |
4548 | * is unlocking the node after the final |
4549 | * decrement) |
4550 | */ |
4551 | binder_node_unlock(node); |
4552 | binder_free_node(node); |
4553 | } else |
4554 | binder_inner_proc_unlock(proc); |
4555 | |
4556 | if (weak && !has_weak_ref) |
4557 | ret = binder_put_node_cmd( |
4558 | proc, thread, ptrp: &ptr, node_ptr, |
4559 | node_cookie, node_debug_id, |
4560 | cmd: BR_INCREFS, cmd_name: "BR_INCREFS" ); |
4561 | if (!ret && strong && !has_strong_ref) |
4562 | ret = binder_put_node_cmd( |
4563 | proc, thread, ptrp: &ptr, node_ptr, |
4564 | node_cookie, node_debug_id, |
4565 | cmd: BR_ACQUIRE, cmd_name: "BR_ACQUIRE" ); |
4566 | if (!ret && !strong && has_strong_ref) |
4567 | ret = binder_put_node_cmd( |
4568 | proc, thread, ptrp: &ptr, node_ptr, |
4569 | node_cookie, node_debug_id, |
4570 | cmd: BR_RELEASE, cmd_name: "BR_RELEASE" ); |
4571 | if (!ret && !weak && has_weak_ref) |
4572 | ret = binder_put_node_cmd( |
4573 | proc, thread, ptrp: &ptr, node_ptr, |
4574 | node_cookie, node_debug_id, |
4575 | cmd: BR_DECREFS, cmd_name: "BR_DECREFS" ); |
4576 | if (orig_ptr == ptr) |
4577 | binder_debug(mask: BINDER_DEBUG_INTERNAL_REFS, |
4578 | format: "%d:%d node %d u%016llx c%016llx state unchanged\n" , |
4579 | proc->pid, thread->pid, |
4580 | node_debug_id, |
4581 | (u64)node_ptr, |
4582 | (u64)node_cookie); |
4583 | if (ret) |
4584 | return ret; |
4585 | } break; |
4586 | case BINDER_WORK_DEAD_BINDER: |
4587 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
4588 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { |
4589 | struct binder_ref_death *death; |
4590 | uint32_t cmd; |
4591 | binder_uintptr_t cookie; |
4592 | |
4593 | death = container_of(w, struct binder_ref_death, work); |
4594 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) |
4595 | cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; |
4596 | else |
4597 | cmd = BR_DEAD_BINDER; |
4598 | cookie = death->cookie; |
4599 | |
4600 | binder_debug(mask: BINDER_DEBUG_DEATH_NOTIFICATION, |
4601 | format: "%d:%d %s %016llx\n" , |
4602 | proc->pid, thread->pid, |
4603 | cmd == BR_DEAD_BINDER ? |
4604 | "BR_DEAD_BINDER" : |
4605 | "BR_CLEAR_DEATH_NOTIFICATION_DONE" , |
4606 | (u64)cookie); |
4607 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { |
4608 | binder_inner_proc_unlock(proc); |
4609 | kfree(objp: death); |
4610 | binder_stats_deleted(type: BINDER_STAT_DEATH); |
4611 | } else { |
4612 | binder_enqueue_work_ilocked( |
4613 | work: w, target_list: &proc->delivered_death); |
4614 | binder_inner_proc_unlock(proc); |
4615 | } |
4616 | if (put_user(cmd, (uint32_t __user *)ptr)) |
4617 | return -EFAULT; |
4618 | ptr += sizeof(uint32_t); |
4619 | if (put_user(cookie, |
4620 | (binder_uintptr_t __user *)ptr)) |
4621 | return -EFAULT; |
4622 | ptr += sizeof(binder_uintptr_t); |
4623 | binder_stat_br(proc, thread, cmd); |
4624 | if (cmd == BR_DEAD_BINDER) |
4625 | goto done; /* DEAD_BINDER notifications can cause transactions */ |
4626 | } break; |
4627 | default: |
4628 | binder_inner_proc_unlock(proc); |
4629 | pr_err("%d:%d: bad work type %d\n" , |
4630 | proc->pid, thread->pid, w->type); |
4631 | break; |
4632 | } |
4633 | |
4634 | if (!t) |
4635 | continue; |
4636 | |
4637 | BUG_ON(t->buffer == NULL); |
4638 | if (t->buffer->target_node) { |
4639 | struct binder_node *target_node = t->buffer->target_node; |
4640 | |
4641 | trd->target.ptr = target_node->ptr; |
4642 | trd->cookie = target_node->cookie; |
4643 | t->saved_priority = task_nice(current); |
4644 | if (t->priority < target_node->min_priority && |
4645 | !(t->flags & TF_ONE_WAY)) |
4646 | binder_set_nice(nice: t->priority); |
4647 | else if (!(t->flags & TF_ONE_WAY) || |
4648 | t->saved_priority > target_node->min_priority) |
4649 | binder_set_nice(nice: target_node->min_priority); |
4650 | cmd = BR_TRANSACTION; |
4651 | } else { |
4652 | trd->target.ptr = 0; |
4653 | trd->cookie = 0; |
4654 | cmd = BR_REPLY; |
4655 | } |
4656 | trd->code = t->code; |
4657 | trd->flags = t->flags; |
4658 | trd->sender_euid = from_kuid(current_user_ns(), uid: t->sender_euid); |
4659 | |
4660 | t_from = binder_get_txn_from(t); |
4661 | if (t_from) { |
4662 | struct task_struct *sender = t_from->proc->tsk; |
4663 | |
4664 | trd->sender_pid = |
4665 | task_tgid_nr_ns(tsk: sender, |
4666 | ns: task_active_pid_ns(current)); |
4667 | } else { |
4668 | trd->sender_pid = 0; |
4669 | } |
4670 | |
4671 | ret = binder_apply_fd_fixups(proc, t); |
4672 | if (ret) { |
4673 | struct binder_buffer *buffer = t->buffer; |
4674 | bool oneway = !!(t->flags & TF_ONE_WAY); |
4675 | int tid = t->debug_id; |
4676 | |
4677 | if (t_from) |
4678 | binder_thread_dec_tmpref(thread: t_from); |
4679 | buffer->transaction = NULL; |
4680 | binder_cleanup_transaction(t, reason: "fd fixups failed" , |
4681 | error_code: BR_FAILED_REPLY); |
4682 | binder_free_buf(proc, thread, buffer, is_failure: true); |
4683 | binder_debug(mask: BINDER_DEBUG_FAILED_TRANSACTION, |
4684 | format: "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n" , |
4685 | proc->pid, thread->pid, |
4686 | oneway ? "async " : |
4687 | (cmd == BR_REPLY ? "reply " : "" ), |
4688 | tid, BR_FAILED_REPLY, ret, __LINE__); |
4689 | if (cmd == BR_REPLY) { |
4690 | cmd = BR_FAILED_REPLY; |
4691 | if (put_user(cmd, (uint32_t __user *)ptr)) |
4692 | return -EFAULT; |
4693 | ptr += sizeof(uint32_t); |
4694 | binder_stat_br(proc, thread, cmd); |
4695 | break; |
4696 | } |
4697 | continue; |
4698 | } |
4699 | trd->data_size = t->buffer->data_size; |
4700 | trd->offsets_size = t->buffer->offsets_size; |
4701 | trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; |
4702 | trd->data.ptr.offsets = trd->data.ptr.buffer + |
4703 | ALIGN(t->buffer->data_size, |
4704 | sizeof(void *)); |
4705 | |
4706 | tr.secctx = t->security_ctx; |
4707 | if (t->security_ctx) { |
4708 | cmd = BR_TRANSACTION_SEC_CTX; |
4709 | trsize = sizeof(tr); |
4710 | } |
4711 | if (put_user(cmd, (uint32_t __user *)ptr)) { |
4712 | if (t_from) |
4713 | binder_thread_dec_tmpref(thread: t_from); |
4714 | |
4715 | binder_cleanup_transaction(t, reason: "put_user failed" , |
4716 | error_code: BR_FAILED_REPLY); |
4717 | |
4718 | return -EFAULT; |
4719 | } |
4720 | ptr += sizeof(uint32_t); |
4721 | if (copy_to_user(to: ptr, from: &tr, n: trsize)) { |
4722 | if (t_from) |
4723 | binder_thread_dec_tmpref(thread: t_from); |
4724 | |
4725 | binder_cleanup_transaction(t, reason: "copy_to_user failed" , |
4726 | error_code: BR_FAILED_REPLY); |
4727 | |
4728 | return -EFAULT; |
4729 | } |
4730 | ptr += trsize; |
4731 | |
4732 | trace_binder_transaction_received(t); |
4733 | binder_stat_br(proc, thread, cmd); |
4734 | binder_debug(mask: BINDER_DEBUG_TRANSACTION, |
4735 | format: "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n" , |
4736 | proc->pid, thread->pid, |
4737 | (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : |
4738 | (cmd == BR_TRANSACTION_SEC_CTX) ? |
4739 | "BR_TRANSACTION_SEC_CTX" : "BR_REPLY" , |
4740 | t->debug_id, t_from ? t_from->proc->pid : 0, |
4741 | t_from ? t_from->pid : 0, cmd, |
4742 | t->buffer->data_size, t->buffer->offsets_size, |
4743 | (u64)trd->data.ptr.buffer, |
4744 | (u64)trd->data.ptr.offsets); |
4745 | |
4746 | if (t_from) |
4747 | binder_thread_dec_tmpref(thread: t_from); |
4748 | t->buffer->allow_user_free = 1; |
4749 | if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { |
4750 | binder_inner_proc_lock(thread->proc); |
4751 | t->to_parent = thread->transaction_stack; |
4752 | t->to_thread = thread; |
4753 | thread->transaction_stack = t; |
4754 | binder_inner_proc_unlock(thread->proc); |
4755 | } else { |
4756 | binder_free_transaction(t); |
4757 | } |
4758 | break; |
4759 | } |
4760 | |
4761 | done: |
4762 | |
4763 | *consumed = ptr - buffer; |
4764 | binder_inner_proc_lock(proc); |
4765 | if (proc->requested_threads == 0 && |
4766 | list_empty(head: &thread->proc->waiting_threads) && |
4767 | proc->requested_threads_started < proc->max_threads && |
4768 | (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
4769 | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ |
4770 | /*spawn a new thread if we leave this out */) { |
4771 | proc->requested_threads++; |
4772 | binder_inner_proc_unlock(proc); |
4773 | binder_debug(mask: BINDER_DEBUG_THREADS, |
4774 | format: "%d:%d BR_SPAWN_LOOPER\n" , |
4775 | proc->pid, thread->pid); |
4776 | if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) |
4777 | return -EFAULT; |
4778 | binder_stat_br(proc, thread, cmd: BR_SPAWN_LOOPER); |
4779 | } else |
4780 | binder_inner_proc_unlock(proc); |
4781 | return 0; |
4782 | } |
4783 | |
4784 | static void binder_release_work(struct binder_proc *proc, |
4785 | struct list_head *list) |
4786 | { |
4787 | struct binder_work *w; |
4788 | enum binder_work_type wtype; |
4789 | |
4790 | while (1) { |
4791 | binder_inner_proc_lock(proc); |
4792 | w = binder_dequeue_work_head_ilocked(list); |
4793 | wtype = w ? w->type : 0; |
4794 | binder_inner_proc_unlock(proc); |
4795 | if (!w) |
4796 | return; |
4797 | |
4798 | switch (wtype) { |
4799 | case BINDER_WORK_TRANSACTION: { |
4800 | struct binder_transaction *t; |
4801 | |
4802 | t = container_of(w, struct binder_transaction, work); |
4803 | |
4804 | binder_cleanup_transaction(t, reason: "process died." , |
4805 | error_code: BR_DEAD_REPLY); |
4806 | } break; |
4807 | case BINDER_WORK_RETURN_ERROR: { |
4808 | struct binder_error *e = container_of( |
4809 | w, struct binder_error, work); |
4810 | |
4811 | binder_debug(mask: BINDER_DEBUG_DEAD_TRANSACTION, |
4812 | format: "undelivered TRANSACTION_ERROR: %u\n" , |
4813 | e->cmd); |
4814 | } break; |
4815 | case BINDER_WORK_TRANSACTION_PENDING: |
4816 | case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: |
4817 | case BINDER_WORK_TRANSACTION_COMPLETE: { |
4818 | binder_debug(mask: BINDER_DEBUG_DEAD_TRANSACTION, |
4819 | format: "undelivered TRANSACTION_COMPLETE\n" ); |
4820 | kfree(objp: w); |
4821 | binder_stats_deleted(type: BINDER_STAT_TRANSACTION_COMPLETE); |
4822 | } break; |
4823 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
4824 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { |
4825 | struct binder_ref_death *death; |
4826 | |
4827 | death = container_of(w, struct binder_ref_death, work); |
4828 | binder_debug(mask: BINDER_DEBUG_DEAD_TRANSACTION, |
4829 | format: "undelivered death notification, %016llx\n" , |
4830 | (u64)death->cookie); |
4831 | kfree(objp: death); |
4832 | binder_stats_deleted(type: BINDER_STAT_DEATH); |
4833 | } break; |
4834 | case BINDER_WORK_NODE: |
4835 | break; |
4836 | default: |
4837 | pr_err("unexpected work type, %d, not freed\n" , |
4838 | wtype); |
4839 | break; |
4840 | } |
4841 | } |
4842 | |
4843 | } |
4844 | |
4845 | static struct binder_thread *binder_get_thread_ilocked( |
4846 | struct binder_proc *proc, struct binder_thread *new_thread) |
4847 | { |
4848 | struct binder_thread *thread = NULL; |
4849 | struct rb_node *parent = NULL; |
4850 | struct rb_node **p = &proc->threads.rb_node; |
4851 | |
4852 | while (*p) { |
4853 | parent = *p; |
4854 | thread = rb_entry(parent, struct binder_thread, rb_node); |
4855 | |
4856 | if (current->pid < thread->pid) |
4857 | p = &(*p)->rb_left; |
4858 | else if (current->pid > thread->pid) |
4859 | p = &(*p)->rb_right; |
4860 | else |
4861 | return thread; |
4862 | } |
4863 | if (!new_thread) |
4864 | return NULL; |
4865 | thread = new_thread; |
4866 | binder_stats_created(type: BINDER_STAT_THREAD); |
4867 | thread->proc = proc; |
4868 | thread->pid = current->pid; |
4869 | atomic_set(v: &thread->tmp_ref, i: 0); |
4870 | init_waitqueue_head(&thread->wait); |
4871 | INIT_LIST_HEAD(list: &thread->todo); |
4872 | rb_link_node(node: &thread->rb_node, parent, rb_link: p); |
4873 | rb_insert_color(&thread->rb_node, &proc->threads); |
4874 | thread->looper_need_return = true; |
4875 | thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; |
4876 | thread->return_error.cmd = BR_OK; |
4877 | thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; |
4878 | thread->reply_error.cmd = BR_OK; |
4879 | thread->ee.command = BR_OK; |
4880 | INIT_LIST_HEAD(list: &new_thread->waiting_thread_node); |
4881 | return thread; |
4882 | } |
4883 | |
4884 | static struct binder_thread *binder_get_thread(struct binder_proc *proc) |
4885 | { |
4886 | struct binder_thread *thread; |
4887 | struct binder_thread *new_thread; |
4888 | |
4889 | binder_inner_proc_lock(proc); |
4890 | thread = binder_get_thread_ilocked(proc, NULL); |
4891 | binder_inner_proc_unlock(proc); |
4892 | if (!thread) { |
4893 | new_thread = kzalloc(size: sizeof(*thread), GFP_KERNEL); |
4894 | if (new_thread == NULL) |
4895 | return NULL; |
4896 | binder_inner_proc_lock(proc); |
4897 | thread = binder_get_thread_ilocked(proc, new_thread); |
4898 | binder_inner_proc_unlock(proc); |
4899 | if (thread != new_thread) |
4900 | kfree(objp: new_thread); |
4901 | } |
4902 | return thread; |
4903 | } |
4904 | |
4905 | static void binder_free_proc(struct binder_proc *proc) |
4906 | { |
4907 | struct binder_device *device; |
4908 | |
4909 | BUG_ON(!list_empty(&proc->todo)); |
4910 | BUG_ON(!list_empty(&proc->delivered_death)); |
4911 | if (proc->outstanding_txns) |
4912 | pr_warn("%s: Unexpected outstanding_txns %d\n" , |
4913 | __func__, proc->outstanding_txns); |
4914 | device = container_of(proc->context, struct binder_device, context); |
4915 | if (refcount_dec_and_test(r: &device->ref)) { |
4916 | kfree(objp: proc->context->name); |
4917 | kfree(objp: device); |
4918 | } |
4919 | binder_alloc_deferred_release(alloc: &proc->alloc); |
4920 | put_task_struct(t: proc->tsk); |
4921 | put_cred(cred: proc->cred); |
4922 | binder_stats_deleted(type: BINDER_STAT_PROC); |
4923 | kfree(objp: proc); |
4924 | } |
4925 | |
4926 | static void binder_free_thread(struct binder_thread *thread) |
4927 | { |
4928 | BUG_ON(!list_empty(&thread->todo)); |
4929 | binder_stats_deleted(type: BINDER_STAT_THREAD); |
4930 | binder_proc_dec_tmpref(proc: thread->proc); |
4931 | kfree(objp: thread); |
4932 | } |
4933 | |
4934 | static int binder_thread_release(struct binder_proc *proc, |
4935 | struct binder_thread *thread) |
4936 | { |
4937 | struct binder_transaction *t; |
4938 | struct binder_transaction *send_reply = NULL; |
4939 | int active_transactions = 0; |
4940 | struct binder_transaction *last_t = NULL; |
4941 | |
4942 | binder_inner_proc_lock(thread->proc); |
4943 | /* |
4944 | * take a ref on the proc so it survives |
4945 | * after we remove this thread from proc->threads. |
4946 | * The corresponding dec is when we actually |
4947 | * free the thread in binder_free_thread() |
4948 | */ |
4949 | proc->tmp_ref++; |
4950 | /* |
4951 | * take a ref on this thread to ensure it |
4952 | * survives while we are releasing it |
4953 | */ |
4954 | atomic_inc(v: &thread->tmp_ref); |
4955 | rb_erase(&thread->rb_node, &proc->threads); |
4956 | t = thread->transaction_stack; |
4957 | if (t) { |
4958 | spin_lock(lock: &t->lock); |
4959 | if (t->to_thread == thread) |
4960 | send_reply = t; |
4961 | } else { |
4962 | __acquire(&t->lock); |
4963 | } |
4964 | thread->is_dead = true; |
4965 | |
4966 | while (t) { |
4967 | last_t = t; |
4968 | active_transactions++; |
4969 | binder_debug(mask: BINDER_DEBUG_DEAD_TRANSACTION, |
4970 | format: "release %d:%d transaction %d %s, still active\n" , |
4971 | proc->pid, thread->pid, |
4972 | t->debug_id, |
4973 | (t->to_thread == thread) ? "in" : "out" ); |
4974 | |
4975 | if (t->to_thread == thread) { |
4976 | thread->proc->outstanding_txns--; |
4977 | t->to_proc = NULL; |
4978 | t->to_thread = NULL; |
4979 | if (t->buffer) { |
4980 | t->buffer->transaction = NULL; |
4981 | t->buffer = NULL; |
4982 | } |
4983 | t = t->to_parent; |
4984 | } else if (t->from == thread) { |
4985 | t->from = NULL; |
4986 | t = t->from_parent; |
4987 | } else |
4988 | BUG(); |
4989 | spin_unlock(lock: &last_t->lock); |
4990 | if (t) |
4991 | spin_lock(lock: &t->lock); |
4992 | else |
4993 | __acquire(&t->lock); |
4994 | } |
4995 | /* annotation for sparse, lock not acquired in last iteration above */ |
4996 | __release(&t->lock); |
4997 | |
4998 | /* |
4999 | * If this thread used poll, make sure we remove the waitqueue from any |
5000 | * poll data structures holding it. |
5001 | */ |
5002 | if (thread->looper & BINDER_LOOPER_STATE_POLL) |
5003 | wake_up_pollfree(wq_head: &thread->wait); |
5004 | |
5005 | binder_inner_proc_unlock(thread->proc); |
5006 | |
5007 | /* |
5008 | * This is needed to avoid races between wake_up_pollfree() above and |
5009 | * someone else removing the last entry from the queue for other reasons |
5010 | * (e.g. ep_remove_wait_queue() being called due to an epoll file |
5011 | * descriptor being closed). Such other users hold an RCU read lock, so |
5012 | * we can be sure they're done after we call synchronize_rcu(). |
5013 | */ |
5014 | if (thread->looper & BINDER_LOOPER_STATE_POLL) |
5015 | synchronize_rcu(); |
5016 | |
5017 | if (send_reply) |
5018 | binder_send_failed_reply(t: send_reply, error_code: BR_DEAD_REPLY); |
5019 | binder_release_work(proc, list: &thread->todo); |
5020 | binder_thread_dec_tmpref(thread); |
5021 | return active_transactions; |
5022 | } |
5023 | |
5024 | static __poll_t binder_poll(struct file *filp, |
5025 | struct poll_table_struct *wait) |
5026 | { |
5027 | struct binder_proc *proc = filp->private_data; |
5028 | struct binder_thread *thread = NULL; |
5029 | bool wait_for_proc_work; |
5030 | |
5031 | thread = binder_get_thread(proc); |
5032 | if (!thread) |
5033 | return POLLERR; |
5034 | |
5035 | binder_inner_proc_lock(thread->proc); |
5036 | thread->looper |= BINDER_LOOPER_STATE_POLL; |
5037 | wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); |
5038 | |
5039 | binder_inner_proc_unlock(thread->proc); |
5040 | |
5041 | poll_wait(filp, wait_address: &thread->wait, p: wait); |
5042 | |
5043 | if (binder_has_work(thread, do_proc_work: wait_for_proc_work)) |
5044 | return EPOLLIN; |
5045 | |
5046 | return 0; |
5047 | } |
5048 | |
5049 | static int binder_ioctl_write_read(struct file *filp, unsigned long arg, |
5050 | struct binder_thread *thread) |
5051 | { |
5052 | int ret = 0; |
5053 | struct binder_proc *proc = filp->private_data; |
5054 | void __user *ubuf = (void __user *)arg; |
5055 | struct binder_write_read bwr; |
5056 | |
5057 | if (copy_from_user(to: &bwr, from: ubuf, n: sizeof(bwr))) { |
5058 | ret = -EFAULT; |
5059 | goto out; |
5060 | } |
5061 | binder_debug(mask: BINDER_DEBUG_READ_WRITE, |
5062 | format: "%d:%d write %lld at %016llx, read %lld at %016llx\n" , |
5063 | proc->pid, thread->pid, |
5064 | (u64)bwr.write_size, (u64)bwr.write_buffer, |
5065 | (u64)bwr.read_size, (u64)bwr.read_buffer); |
5066 | |
5067 | if (bwr.write_size > 0) { |
5068 | ret = binder_thread_write(proc, thread, |
5069 | binder_buffer: bwr.write_buffer, |
5070 | size: bwr.write_size, |
5071 | consumed: &bwr.write_consumed); |
5072 | trace_binder_write_done(ret); |
5073 | if (ret < 0) { |
5074 | bwr.read_consumed = 0; |
5075 | if (copy_to_user(to: ubuf, from: &bwr, n: sizeof(bwr))) |
5076 | ret = -EFAULT; |
5077 | goto out; |
5078 | } |
5079 | } |
5080 | if (bwr.read_size > 0) { |
5081 | ret = binder_thread_read(proc, thread, binder_buffer: bwr.read_buffer, |
5082 | size: bwr.read_size, |
5083 | consumed: &bwr.read_consumed, |
5084 | non_block: filp->f_flags & O_NONBLOCK); |
5085 | trace_binder_read_done(ret); |
5086 | binder_inner_proc_lock(proc); |
5087 | if (!binder_worklist_empty_ilocked(list: &proc->todo)) |
5088 | binder_wakeup_proc_ilocked(proc); |
5089 | binder_inner_proc_unlock(proc); |
5090 | if (ret < 0) { |
5091 | if (copy_to_user(to: ubuf, from: &bwr, n: sizeof(bwr))) |
5092 | ret = -EFAULT; |
5093 | goto out; |
5094 | } |
5095 | } |
5096 | binder_debug(mask: BINDER_DEBUG_READ_WRITE, |
5097 | format: "%d:%d wrote %lld of %lld, read return %lld of %lld\n" , |
5098 | proc->pid, thread->pid, |
5099 | (u64)bwr.write_consumed, (u64)bwr.write_size, |
5100 | (u64)bwr.read_consumed, (u64)bwr.read_size); |
5101 | if (copy_to_user(to: ubuf, from: &bwr, n: sizeof(bwr))) { |
5102 | ret = -EFAULT; |
5103 | goto out; |
5104 | } |
5105 | out: |
5106 | return ret; |
5107 | } |
5108 | |
5109 | static int binder_ioctl_set_ctx_mgr(struct file *filp, |
5110 | struct flat_binder_object *fbo) |
5111 | { |
5112 | int ret = 0; |
5113 | struct binder_proc *proc = filp->private_data; |
5114 | struct binder_context *context = proc->context; |
5115 | struct binder_node *new_node; |
5116 | kuid_t curr_euid = current_euid(); |
5117 | |
5118 | mutex_lock(&context->context_mgr_node_lock); |
5119 | if (context->binder_context_mgr_node) { |
5120 | pr_err("BINDER_SET_CONTEXT_MGR already set\n" ); |
5121 | ret = -EBUSY; |
5122 | goto out; |
5123 | } |
5124 | ret = security_binder_set_context_mgr(mgr: proc->cred); |
5125 | if (ret < 0) |
5126 | goto out; |
5127 | if (uid_valid(uid: context->binder_context_mgr_uid)) { |
5128 | if (!uid_eq(left: context->binder_context_mgr_uid, right: curr_euid)) { |
5129 | pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n" , |
5130 | from_kuid(&init_user_ns, curr_euid), |
5131 | from_kuid(&init_user_ns, |
5132 | context->binder_context_mgr_uid)); |
5133 | ret = -EPERM; |
5134 | goto out; |
5135 | } |
5136 | } else { |
5137 | context->binder_context_mgr_uid = curr_euid; |
5138 | } |
5139 | new_node = binder_new_node(proc, fp: fbo); |
5140 | if (!new_node) { |
5141 | ret = -ENOMEM; |
5142 | goto out; |
5143 | } |
5144 | binder_node_lock(new_node); |
5145 | new_node->local_weak_refs++; |
5146 | new_node->local_strong_refs++; |
5147 | new_node->has_strong_ref = 1; |
5148 | new_node->has_weak_ref = 1; |
5149 | context->binder_context_mgr_node = new_node; |
5150 | binder_node_unlock(new_node); |
5151 | binder_put_node(node: new_node); |
5152 | out: |
5153 | mutex_unlock(lock: &context->context_mgr_node_lock); |
5154 | return ret; |
5155 | } |
5156 | |
5157 | static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, |
5158 | struct binder_node_info_for_ref *info) |
5159 | { |
5160 | struct binder_node *node; |
5161 | struct binder_context *context = proc->context; |
5162 | __u32 handle = info->handle; |
5163 | |
5164 | if (info->strong_count || info->weak_count || info->reserved1 || |
5165 | info->reserved2 || info->reserved3) { |
5166 | binder_user_error(format: "%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero." , |
5167 | proc->pid); |
5168 | return -EINVAL; |
5169 | } |
5170 | |
5171 | /* This ioctl may only be used by the context manager */ |
5172 | mutex_lock(&context->context_mgr_node_lock); |
5173 | if (!context->binder_context_mgr_node || |
5174 | context->binder_context_mgr_node->proc != proc) { |
5175 | mutex_unlock(lock: &context->context_mgr_node_lock); |
5176 | return -EPERM; |
5177 | } |
5178 | mutex_unlock(lock: &context->context_mgr_node_lock); |
5179 | |
5180 | node = binder_get_node_from_ref(proc, desc: handle, need_strong_ref: true, NULL); |
5181 | if (!node) |
5182 | return -EINVAL; |
5183 | |
5184 | info->strong_count = node->local_strong_refs + |
5185 | node->internal_strong_refs; |
5186 | info->weak_count = node->local_weak_refs; |
5187 | |
5188 | binder_put_node(node); |
5189 | |
5190 | return 0; |
5191 | } |
5192 | |
5193 | static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, |
5194 | struct binder_node_debug_info *info) |
5195 | { |
5196 | struct rb_node *n; |
5197 | binder_uintptr_t ptr = info->ptr; |
5198 | |
5199 | memset(info, 0, sizeof(*info)); |
5200 | |
5201 | binder_inner_proc_lock(proc); |
5202 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { |
5203 | struct binder_node *node = rb_entry(n, struct binder_node, |
5204 | rb_node); |
5205 | if (node->ptr > ptr) { |
5206 | info->ptr = node->ptr; |
5207 | info->cookie = node->cookie; |
5208 | info->has_strong_ref = node->has_strong_ref; |
5209 | info->has_weak_ref = node->has_weak_ref; |
5210 | break; |
5211 | } |
5212 | } |
5213 | binder_inner_proc_unlock(proc); |
5214 | |
5215 | return 0; |
5216 | } |
5217 | |
5218 | static bool binder_txns_pending_ilocked(struct binder_proc *proc) |
5219 | { |
5220 | struct rb_node *n; |
5221 | struct binder_thread *thread; |
5222 | |
5223 | if (proc->outstanding_txns > 0) |
5224 | return true; |
5225 | |
5226 | for (n = rb_first(&proc->threads); n; n = rb_next(n)) { |
5227 | thread = rb_entry(n, struct binder_thread, rb_node); |
5228 | if (thread->transaction_stack) |
5229 | return true; |
5230 | } |
5231 | return false; |
5232 | } |
5233 | |
5234 | static int binder_ioctl_freeze(struct binder_freeze_info *info, |
5235 | struct binder_proc *target_proc) |
5236 | { |
5237 | int ret = 0; |
5238 | |
5239 | if (!info->enable) { |
5240 | binder_inner_proc_lock(target_proc); |
5241 | target_proc->sync_recv = false; |
5242 | target_proc->async_recv = false; |
5243 | target_proc->is_frozen = false; |
5244 | binder_inner_proc_unlock(target_proc); |
5245 | return 0; |
5246 | } |
5247 | |
5248 | /* |
5249 | * Freezing the target. Prevent new transactions by |
5250 | * setting frozen state. If timeout specified, wait |
5251 | * for transactions to drain. |
5252 | */ |
5253 | binder_inner_proc_lock(target_proc); |
5254 | target_proc->sync_recv = false; |
5255 | target_proc->async_recv = false; |
5256 | target_proc->is_frozen = true; |
5257 | binder_inner_proc_unlock(target_proc); |
5258 | |
5259 | if (info->timeout_ms > 0) |
5260 | ret = wait_event_interruptible_timeout( |
5261 | target_proc->freeze_wait, |
5262 | (!target_proc->outstanding_txns), |
5263 | msecs_to_jiffies(info->timeout_ms)); |
5264 | |
5265 | /* Check pending transactions that wait for reply */ |
5266 | if (ret >= 0) { |
5267 | binder_inner_proc_lock(target_proc); |
5268 | if (binder_txns_pending_ilocked(proc: target_proc)) |
5269 | ret = -EAGAIN; |
5270 | binder_inner_proc_unlock(target_proc); |
5271 | } |
5272 | |
5273 | if (ret < 0) { |
5274 | binder_inner_proc_lock(target_proc); |
5275 | target_proc->is_frozen = false; |
5276 | binder_inner_proc_unlock(target_proc); |
5277 | } |
5278 | |
5279 | return ret; |
5280 | } |
5281 | |
5282 | static int binder_ioctl_get_freezer_info( |
5283 | struct binder_frozen_status_info *info) |
5284 | { |
5285 | struct binder_proc *target_proc; |
5286 | bool found = false; |
5287 | __u32 txns_pending; |
5288 | |
5289 | info->sync_recv = 0; |
5290 | info->async_recv = 0; |
5291 | |
5292 | mutex_lock(&binder_procs_lock); |
5293 | hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
5294 | if (target_proc->pid == info->pid) { |
5295 | found = true; |
5296 | binder_inner_proc_lock(target_proc); |
5297 | txns_pending = binder_txns_pending_ilocked(proc: target_proc); |
5298 | info->sync_recv |= target_proc->sync_recv | |
5299 | (txns_pending << 1); |
5300 | info->async_recv |= target_proc->async_recv; |
5301 | binder_inner_proc_unlock(target_proc); |
5302 | } |
5303 | } |
5304 | mutex_unlock(lock: &binder_procs_lock); |
5305 | |
5306 | if (!found) |
5307 | return -EINVAL; |
5308 | |
5309 | return 0; |
5310 | } |
5311 | |
5312 | static int binder_ioctl_get_extended_error(struct binder_thread *thread, |
5313 | void __user *ubuf) |
5314 | { |
5315 | struct binder_extended_error ee; |
5316 | |
5317 | binder_inner_proc_lock(thread->proc); |
5318 | ee = thread->ee; |
5319 | binder_set_extended_error(&thread->ee, 0, BR_OK, 0); |
5320 | binder_inner_proc_unlock(thread->proc); |
5321 | |
5322 | if (copy_to_user(to: ubuf, from: &ee, n: sizeof(ee))) |
5323 | return -EFAULT; |
5324 | |
5325 | return 0; |
5326 | } |
5327 | |
5328 | static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
5329 | { |
5330 | int ret; |
5331 | struct binder_proc *proc = filp->private_data; |
5332 | struct binder_thread *thread; |
5333 | void __user *ubuf = (void __user *)arg; |
5334 | |
5335 | /*pr_info("binder_ioctl: %d:%d %x %lx\n", |
5336 | proc->pid, current->pid, cmd, arg);*/ |
5337 | |
5338 | binder_selftest_alloc(alloc: &proc->alloc); |
5339 | |
5340 | trace_binder_ioctl(cmd, arg); |
5341 | |
5342 | ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
5343 | if (ret) |
5344 | goto err_unlocked; |
5345 | |
5346 | thread = binder_get_thread(proc); |
5347 | if (thread == NULL) { |
5348 | ret = -ENOMEM; |
5349 | goto err; |
5350 | } |
5351 | |
5352 | switch (cmd) { |
5353 | case BINDER_WRITE_READ: |
5354 | ret = binder_ioctl_write_read(filp, arg, thread); |
5355 | if (ret) |
5356 | goto err; |
5357 | break; |
5358 | case BINDER_SET_MAX_THREADS: { |
5359 | int max_threads; |
5360 | |
5361 | if (copy_from_user(to: &max_threads, from: ubuf, |
5362 | n: sizeof(max_threads))) { |
5363 | ret = -EINVAL; |
5364 | goto err; |
5365 | } |
5366 | binder_inner_proc_lock(proc); |
5367 | proc->max_threads = max_threads; |
5368 | binder_inner_proc_unlock(proc); |
5369 | break; |
5370 | } |
5371 | case BINDER_SET_CONTEXT_MGR_EXT: { |
5372 | struct flat_binder_object fbo; |
5373 | |
5374 | if (copy_from_user(to: &fbo, from: ubuf, n: sizeof(fbo))) { |
5375 | ret = -EINVAL; |
5376 | goto err; |
5377 | } |
5378 | ret = binder_ioctl_set_ctx_mgr(filp, fbo: &fbo); |
5379 | if (ret) |
5380 | goto err; |
5381 | break; |
5382 | } |
5383 | case BINDER_SET_CONTEXT_MGR: |
5384 | ret = binder_ioctl_set_ctx_mgr(filp, NULL); |
5385 | if (ret) |
5386 | goto err; |
5387 | break; |
5388 | case BINDER_THREAD_EXIT: |
5389 | binder_debug(mask: BINDER_DEBUG_THREADS, format: "%d:%d exit\n" , |
5390 | proc->pid, thread->pid); |
5391 | binder_thread_release(proc, thread); |
5392 | thread = NULL; |
5393 | break; |
5394 | case BINDER_VERSION: { |
5395 | struct binder_version __user *ver = ubuf; |
5396 | |
5397 | if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, |
5398 | &ver->protocol_version)) { |
5399 | ret = -EINVAL; |
5400 | goto err; |
5401 | } |
5402 | break; |
5403 | } |
5404 | case BINDER_GET_NODE_INFO_FOR_REF: { |
5405 | struct binder_node_info_for_ref info; |
5406 | |
5407 | if (copy_from_user(to: &info, from: ubuf, n: sizeof(info))) { |
5408 | ret = -EFAULT; |
5409 | goto err; |
5410 | } |
5411 | |
5412 | ret = binder_ioctl_get_node_info_for_ref(proc, info: &info); |
5413 | if (ret < 0) |
5414 | goto err; |
5415 | |
5416 | if (copy_to_user(to: ubuf, from: &info, n: sizeof(info))) { |
5417 | ret = -EFAULT; |
5418 | goto err; |
5419 | } |
5420 | |
5421 | break; |
5422 | } |
5423 | case BINDER_GET_NODE_DEBUG_INFO: { |
5424 | struct binder_node_debug_info info; |
5425 | |
5426 | if (copy_from_user(to: &info, from: ubuf, n: sizeof(info))) { |
5427 | ret = -EFAULT; |
5428 | goto err; |
5429 | } |
5430 | |
5431 | ret = binder_ioctl_get_node_debug_info(proc, info: &info); |
5432 | if (ret < 0) |
5433 | goto err; |
5434 | |
5435 | if (copy_to_user(to: ubuf, from: &info, n: sizeof(info))) { |
5436 | ret = -EFAULT; |
5437 | goto err; |
5438 | } |
5439 | break; |
5440 | } |
5441 | case BINDER_FREEZE: { |
5442 | struct binder_freeze_info info; |
5443 | struct binder_proc **target_procs = NULL, *target_proc; |
5444 | int target_procs_count = 0, i = 0; |
5445 | |
5446 | ret = 0; |
5447 | |
5448 | if (copy_from_user(to: &info, from: ubuf, n: sizeof(info))) { |
5449 | ret = -EFAULT; |
5450 | goto err; |
5451 | } |
5452 | |
5453 | mutex_lock(&binder_procs_lock); |
5454 | hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
5455 | if (target_proc->pid == info.pid) |
5456 | target_procs_count++; |
5457 | } |
5458 | |
5459 | if (target_procs_count == 0) { |
5460 | mutex_unlock(lock: &binder_procs_lock); |
5461 | ret = -EINVAL; |
5462 | goto err; |
5463 | } |
5464 | |
5465 | target_procs = kcalloc(n: target_procs_count, |
5466 | size: sizeof(struct binder_proc *), |
5467 | GFP_KERNEL); |
5468 | |
5469 | if (!target_procs) { |
5470 | mutex_unlock(lock: &binder_procs_lock); |
5471 | ret = -ENOMEM; |
5472 | goto err; |
5473 | } |
5474 | |
5475 | hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
5476 | if (target_proc->pid != info.pid) |
5477 | continue; |
5478 | |
5479 | binder_inner_proc_lock(target_proc); |
5480 | target_proc->tmp_ref++; |
5481 | binder_inner_proc_unlock(target_proc); |
5482 | |
5483 | target_procs[i++] = target_proc; |
5484 | } |
5485 | mutex_unlock(lock: &binder_procs_lock); |
5486 | |
5487 | for (i = 0; i < target_procs_count; i++) { |
5488 | if (ret >= 0) |
5489 | ret = binder_ioctl_freeze(info: &info, |
5490 | target_proc: target_procs[i]); |
5491 | |
5492 | binder_proc_dec_tmpref(proc: target_procs[i]); |
5493 | } |
5494 | |
5495 | kfree(objp: target_procs); |
5496 | |
5497 | if (ret < 0) |
5498 | goto err; |
5499 | break; |
5500 | } |
5501 | case BINDER_GET_FROZEN_INFO: { |
5502 | struct binder_frozen_status_info info; |
5503 | |
5504 | if (copy_from_user(to: &info, from: ubuf, n: sizeof(info))) { |
5505 | ret = -EFAULT; |
5506 | goto err; |
5507 | } |
5508 | |
5509 | ret = binder_ioctl_get_freezer_info(info: &info); |
5510 | if (ret < 0) |
5511 | goto err; |
5512 | |
5513 | if (copy_to_user(to: ubuf, from: &info, n: sizeof(info))) { |
5514 | ret = -EFAULT; |
5515 | goto err; |
5516 | } |
5517 | break; |
5518 | } |
5519 | case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { |
5520 | uint32_t enable; |
5521 | |
5522 | if (copy_from_user(to: &enable, from: ubuf, n: sizeof(enable))) { |
5523 | ret = -EFAULT; |
5524 | goto err; |
5525 | } |
5526 | binder_inner_proc_lock(proc); |
5527 | proc->oneway_spam_detection_enabled = (bool)enable; |
5528 | binder_inner_proc_unlock(proc); |
5529 | break; |
5530 | } |
5531 | case BINDER_GET_EXTENDED_ERROR: |
5532 | ret = binder_ioctl_get_extended_error(thread, ubuf); |
5533 | if (ret < 0) |
5534 | goto err; |
5535 | break; |
5536 | default: |
5537 | ret = -EINVAL; |
5538 | goto err; |
5539 | } |
5540 | ret = 0; |
5541 | err: |
5542 | if (thread) |
5543 | thread->looper_need_return = false; |
5544 | wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
5545 | if (ret && ret != -EINTR) |
5546 | pr_info("%d:%d ioctl %x %lx returned %d\n" , proc->pid, current->pid, cmd, arg, ret); |
5547 | err_unlocked: |
5548 | trace_binder_ioctl_done(ret); |
5549 | return ret; |
5550 | } |
5551 | |
5552 | static void binder_vma_open(struct vm_area_struct *vma) |
5553 | { |
5554 | struct binder_proc *proc = vma->vm_private_data; |
5555 | |
5556 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, |
5557 | format: "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n" , |
5558 | proc->pid, vma->vm_start, vma->vm_end, |
5559 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
5560 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
5561 | } |
5562 | |
5563 | static void binder_vma_close(struct vm_area_struct *vma) |
5564 | { |
5565 | struct binder_proc *proc = vma->vm_private_data; |
5566 | |
5567 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, |
5568 | format: "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n" , |
5569 | proc->pid, vma->vm_start, vma->vm_end, |
5570 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
5571 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
5572 | binder_alloc_vma_close(alloc: &proc->alloc); |
5573 | } |
5574 | |
5575 | static vm_fault_t binder_vm_fault(struct vm_fault *vmf) |
5576 | { |
5577 | return VM_FAULT_SIGBUS; |
5578 | } |
5579 | |
5580 | static const struct vm_operations_struct binder_vm_ops = { |
5581 | .open = binder_vma_open, |
5582 | .close = binder_vma_close, |
5583 | .fault = binder_vm_fault, |
5584 | }; |
5585 | |
5586 | static int binder_mmap(struct file *filp, struct vm_area_struct *vma) |
5587 | { |
5588 | struct binder_proc *proc = filp->private_data; |
5589 | |
5590 | if (proc->tsk != current->group_leader) |
5591 | return -EINVAL; |
5592 | |
5593 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, |
5594 | format: "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n" , |
5595 | __func__, proc->pid, vma->vm_start, vma->vm_end, |
5596 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
5597 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
5598 | |
5599 | if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { |
5600 | pr_err("%s: %d %lx-%lx %s failed %d\n" , __func__, |
5601 | proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags" , -EPERM); |
5602 | return -EPERM; |
5603 | } |
5604 | vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); |
5605 | |
5606 | vma->vm_ops = &binder_vm_ops; |
5607 | vma->vm_private_data = proc; |
5608 | |
5609 | return binder_alloc_mmap_handler(alloc: &proc->alloc, vma); |
5610 | } |
5611 | |
5612 | static int binder_open(struct inode *nodp, struct file *filp) |
5613 | { |
5614 | struct binder_proc *proc, *itr; |
5615 | struct binder_device *binder_dev; |
5616 | struct binderfs_info *info; |
5617 | struct dentry *binder_binderfs_dir_entry_proc = NULL; |
5618 | bool existing_pid = false; |
5619 | |
5620 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, format: "%s: %d:%d\n" , __func__, |
5621 | current->group_leader->pid, current->pid); |
5622 | |
5623 | proc = kzalloc(size: sizeof(*proc), GFP_KERNEL); |
5624 | if (proc == NULL) |
5625 | return -ENOMEM; |
5626 | spin_lock_init(&proc->inner_lock); |
5627 | spin_lock_init(&proc->outer_lock); |
5628 | get_task_struct(current->group_leader); |
5629 | proc->tsk = current->group_leader; |
5630 | proc->cred = get_cred(cred: filp->f_cred); |
5631 | INIT_LIST_HEAD(list: &proc->todo); |
5632 | init_waitqueue_head(&proc->freeze_wait); |
5633 | proc->default_priority = task_nice(current); |
5634 | /* binderfs stashes devices in i_private */ |
5635 | if (is_binderfs_device(inode: nodp)) { |
5636 | binder_dev = nodp->i_private; |
5637 | info = nodp->i_sb->s_fs_info; |
5638 | binder_binderfs_dir_entry_proc = info->proc_log_dir; |
5639 | } else { |
5640 | binder_dev = container_of(filp->private_data, |
5641 | struct binder_device, miscdev); |
5642 | } |
5643 | refcount_inc(r: &binder_dev->ref); |
5644 | proc->context = &binder_dev->context; |
5645 | binder_alloc_init(alloc: &proc->alloc); |
5646 | |
5647 | binder_stats_created(type: BINDER_STAT_PROC); |
5648 | proc->pid = current->group_leader->pid; |
5649 | INIT_LIST_HEAD(list: &proc->delivered_death); |
5650 | INIT_LIST_HEAD(list: &proc->waiting_threads); |
5651 | filp->private_data = proc; |
5652 | |
5653 | mutex_lock(&binder_procs_lock); |
5654 | hlist_for_each_entry(itr, &binder_procs, proc_node) { |
5655 | if (itr->pid == proc->pid) { |
5656 | existing_pid = true; |
5657 | break; |
5658 | } |
5659 | } |
5660 | hlist_add_head(n: &proc->proc_node, h: &binder_procs); |
5661 | mutex_unlock(lock: &binder_procs_lock); |
5662 | |
5663 | if (binder_debugfs_dir_entry_proc && !existing_pid) { |
5664 | char strbuf[11]; |
5665 | |
5666 | snprintf(buf: strbuf, size: sizeof(strbuf), fmt: "%u" , proc->pid); |
5667 | /* |
5668 | * proc debug entries are shared between contexts. |
5669 | * Only create for the first PID to avoid debugfs log spamming |
5670 | * The printing code will anyway print all contexts for a given |
5671 | * PID so this is not a problem. |
5672 | */ |
5673 | proc->debugfs_entry = debugfs_create_file(name: strbuf, mode: 0444, |
5674 | parent: binder_debugfs_dir_entry_proc, |
5675 | data: (void *)(unsigned long)proc->pid, |
5676 | fops: &proc_fops); |
5677 | } |
5678 | |
5679 | if (binder_binderfs_dir_entry_proc && !existing_pid) { |
5680 | char strbuf[11]; |
5681 | struct dentry *binderfs_entry; |
5682 | |
5683 | snprintf(buf: strbuf, size: sizeof(strbuf), fmt: "%u" , proc->pid); |
5684 | /* |
5685 | * Similar to debugfs, the process specific log file is shared |
5686 | * between contexts. Only create for the first PID. |
5687 | * This is ok since same as debugfs, the log file will contain |
5688 | * information on all contexts of a given PID. |
5689 | */ |
5690 | binderfs_entry = binderfs_create_file(dir: binder_binderfs_dir_entry_proc, |
5691 | name: strbuf, fops: &proc_fops, data: (void *)(unsigned long)proc->pid); |
5692 | if (!IS_ERR(ptr: binderfs_entry)) { |
5693 | proc->binderfs_entry = binderfs_entry; |
5694 | } else { |
5695 | int error; |
5696 | |
5697 | error = PTR_ERR(ptr: binderfs_entry); |
5698 | pr_warn("Unable to create file %s in binderfs (error %d)\n" , |
5699 | strbuf, error); |
5700 | } |
5701 | } |
5702 | |
5703 | return 0; |
5704 | } |
5705 | |
5706 | static int binder_flush(struct file *filp, fl_owner_t id) |
5707 | { |
5708 | struct binder_proc *proc = filp->private_data; |
5709 | |
5710 | binder_defer_work(proc, defer: BINDER_DEFERRED_FLUSH); |
5711 | |
5712 | return 0; |
5713 | } |
5714 | |
5715 | static void binder_deferred_flush(struct binder_proc *proc) |
5716 | { |
5717 | struct rb_node *n; |
5718 | int wake_count = 0; |
5719 | |
5720 | binder_inner_proc_lock(proc); |
5721 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { |
5722 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); |
5723 | |
5724 | thread->looper_need_return = true; |
5725 | if (thread->looper & BINDER_LOOPER_STATE_WAITING) { |
5726 | wake_up_interruptible(&thread->wait); |
5727 | wake_count++; |
5728 | } |
5729 | } |
5730 | binder_inner_proc_unlock(proc); |
5731 | |
5732 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, |
5733 | format: "binder_flush: %d woke %d threads\n" , proc->pid, |
5734 | wake_count); |
5735 | } |
5736 | |
5737 | static int binder_release(struct inode *nodp, struct file *filp) |
5738 | { |
5739 | struct binder_proc *proc = filp->private_data; |
5740 | |
5741 | debugfs_remove(dentry: proc->debugfs_entry); |
5742 | |
5743 | if (proc->binderfs_entry) { |
5744 | binderfs_remove_file(dentry: proc->binderfs_entry); |
5745 | proc->binderfs_entry = NULL; |
5746 | } |
5747 | |
5748 | binder_defer_work(proc, defer: BINDER_DEFERRED_RELEASE); |
5749 | |
5750 | return 0; |
5751 | } |
5752 | |
5753 | static int binder_node_release(struct binder_node *node, int refs) |
5754 | { |
5755 | struct binder_ref *ref; |
5756 | int death = 0; |
5757 | struct binder_proc *proc = node->proc; |
5758 | |
5759 | binder_release_work(proc, list: &node->async_todo); |
5760 | |
5761 | binder_node_lock(node); |
5762 | binder_inner_proc_lock(proc); |
5763 | binder_dequeue_work_ilocked(work: &node->work); |
5764 | /* |
5765 | * The caller must have taken a temporary ref on the node, |
5766 | */ |
5767 | BUG_ON(!node->tmp_refs); |
5768 | if (hlist_empty(h: &node->refs) && node->tmp_refs == 1) { |
5769 | binder_inner_proc_unlock(proc); |
5770 | binder_node_unlock(node); |
5771 | binder_free_node(node); |
5772 | |
5773 | return refs; |
5774 | } |
5775 | |
5776 | node->proc = NULL; |
5777 | node->local_strong_refs = 0; |
5778 | node->local_weak_refs = 0; |
5779 | binder_inner_proc_unlock(proc); |
5780 | |
5781 | spin_lock(lock: &binder_dead_nodes_lock); |
5782 | hlist_add_head(n: &node->dead_node, h: &binder_dead_nodes); |
5783 | spin_unlock(lock: &binder_dead_nodes_lock); |
5784 | |
5785 | hlist_for_each_entry(ref, &node->refs, node_entry) { |
5786 | refs++; |
5787 | /* |
5788 | * Need the node lock to synchronize |
5789 | * with new notification requests and the |
5790 | * inner lock to synchronize with queued |
5791 | * death notifications. |
5792 | */ |
5793 | binder_inner_proc_lock(ref->proc); |
5794 | if (!ref->death) { |
5795 | binder_inner_proc_unlock(ref->proc); |
5796 | continue; |
5797 | } |
5798 | |
5799 | death++; |
5800 | |
5801 | BUG_ON(!list_empty(&ref->death->work.entry)); |
5802 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
5803 | binder_enqueue_work_ilocked(work: &ref->death->work, |
5804 | target_list: &ref->proc->todo); |
5805 | binder_wakeup_proc_ilocked(proc: ref->proc); |
5806 | binder_inner_proc_unlock(ref->proc); |
5807 | } |
5808 | |
5809 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
5810 | format: "node %d now dead, refs %d, death %d\n" , |
5811 | node->debug_id, refs, death); |
5812 | binder_node_unlock(node); |
5813 | binder_put_node(node); |
5814 | |
5815 | return refs; |
5816 | } |
5817 | |
5818 | static void binder_deferred_release(struct binder_proc *proc) |
5819 | { |
5820 | struct binder_context *context = proc->context; |
5821 | struct rb_node *n; |
5822 | int threads, nodes, incoming_refs, outgoing_refs, active_transactions; |
5823 | |
5824 | mutex_lock(&binder_procs_lock); |
5825 | hlist_del(n: &proc->proc_node); |
5826 | mutex_unlock(lock: &binder_procs_lock); |
5827 | |
5828 | mutex_lock(&context->context_mgr_node_lock); |
5829 | if (context->binder_context_mgr_node && |
5830 | context->binder_context_mgr_node->proc == proc) { |
5831 | binder_debug(mask: BINDER_DEBUG_DEAD_BINDER, |
5832 | format: "%s: %d context_mgr_node gone\n" , |
5833 | __func__, proc->pid); |
5834 | context->binder_context_mgr_node = NULL; |
5835 | } |
5836 | mutex_unlock(lock: &context->context_mgr_node_lock); |
5837 | binder_inner_proc_lock(proc); |
5838 | /* |
5839 | * Make sure proc stays alive after we |
5840 | * remove all the threads |
5841 | */ |
5842 | proc->tmp_ref++; |
5843 | |
5844 | proc->is_dead = true; |
5845 | proc->is_frozen = false; |
5846 | proc->sync_recv = false; |
5847 | proc->async_recv = false; |
5848 | threads = 0; |
5849 | active_transactions = 0; |
5850 | while ((n = rb_first(&proc->threads))) { |
5851 | struct binder_thread *thread; |
5852 | |
5853 | thread = rb_entry(n, struct binder_thread, rb_node); |
5854 | binder_inner_proc_unlock(proc); |
5855 | threads++; |
5856 | active_transactions += binder_thread_release(proc, thread); |
5857 | binder_inner_proc_lock(proc); |
5858 | } |
5859 | |
5860 | nodes = 0; |
5861 | incoming_refs = 0; |
5862 | while ((n = rb_first(&proc->nodes))) { |
5863 | struct binder_node *node; |
5864 | |
5865 | node = rb_entry(n, struct binder_node, rb_node); |
5866 | nodes++; |
5867 | /* |
5868 | * take a temporary ref on the node before |
5869 | * calling binder_node_release() which will either |
5870 | * kfree() the node or call binder_put_node() |
5871 | */ |
5872 | binder_inc_node_tmpref_ilocked(node); |
5873 | rb_erase(&node->rb_node, &proc->nodes); |
5874 | binder_inner_proc_unlock(proc); |
5875 | incoming_refs = binder_node_release(node, refs: incoming_refs); |
5876 | binder_inner_proc_lock(proc); |
5877 | } |
5878 | binder_inner_proc_unlock(proc); |
5879 | |
5880 | outgoing_refs = 0; |
5881 | binder_proc_lock(proc); |
5882 | while ((n = rb_first(&proc->refs_by_desc))) { |
5883 | struct binder_ref *ref; |
5884 | |
5885 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
5886 | outgoing_refs++; |
5887 | binder_cleanup_ref_olocked(ref); |
5888 | binder_proc_unlock(proc); |
5889 | binder_free_ref(ref); |
5890 | binder_proc_lock(proc); |
5891 | } |
5892 | binder_proc_unlock(proc); |
5893 | |
5894 | binder_release_work(proc, list: &proc->todo); |
5895 | binder_release_work(proc, list: &proc->delivered_death); |
5896 | |
5897 | binder_debug(mask: BINDER_DEBUG_OPEN_CLOSE, |
5898 | format: "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n" , |
5899 | __func__, proc->pid, threads, nodes, incoming_refs, |
5900 | outgoing_refs, active_transactions); |
5901 | |
5902 | binder_proc_dec_tmpref(proc); |
5903 | } |
5904 | |
5905 | static void binder_deferred_func(struct work_struct *work) |
5906 | { |
5907 | struct binder_proc *proc; |
5908 | |
5909 | int defer; |
5910 | |
5911 | do { |
5912 | mutex_lock(&binder_deferred_lock); |
5913 | if (!hlist_empty(h: &binder_deferred_list)) { |
5914 | proc = hlist_entry(binder_deferred_list.first, |
5915 | struct binder_proc, deferred_work_node); |
5916 | hlist_del_init(n: &proc->deferred_work_node); |
5917 | defer = proc->deferred_work; |
5918 | proc->deferred_work = 0; |
5919 | } else { |
5920 | proc = NULL; |
5921 | defer = 0; |
5922 | } |
5923 | mutex_unlock(lock: &binder_deferred_lock); |
5924 | |
5925 | if (defer & BINDER_DEFERRED_FLUSH) |
5926 | binder_deferred_flush(proc); |
5927 | |
5928 | if (defer & BINDER_DEFERRED_RELEASE) |
5929 | binder_deferred_release(proc); /* frees proc */ |
5930 | } while (proc); |
5931 | } |
5932 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); |
5933 | |
5934 | static void |
5935 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) |
5936 | { |
5937 | mutex_lock(&binder_deferred_lock); |
5938 | proc->deferred_work |= defer; |
5939 | if (hlist_unhashed(h: &proc->deferred_work_node)) { |
5940 | hlist_add_head(n: &proc->deferred_work_node, |
5941 | h: &binder_deferred_list); |
5942 | schedule_work(work: &binder_deferred_work); |
5943 | } |
5944 | mutex_unlock(lock: &binder_deferred_lock); |
5945 | } |
5946 | |
5947 | static void print_binder_transaction_ilocked(struct seq_file *m, |
5948 | struct binder_proc *proc, |
5949 | const char *prefix, |
5950 | struct binder_transaction *t) |
5951 | { |
5952 | struct binder_proc *to_proc; |
5953 | struct binder_buffer *buffer = t->buffer; |
5954 | ktime_t current_time = ktime_get(); |
5955 | |
5956 | spin_lock(lock: &t->lock); |
5957 | to_proc = t->to_proc; |
5958 | seq_printf(m, |
5959 | fmt: "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms" , |
5960 | prefix, t->debug_id, t, |
5961 | t->from_pid, |
5962 | t->from_tid, |
5963 | to_proc ? to_proc->pid : 0, |
5964 | t->to_thread ? t->to_thread->pid : 0, |
5965 | t->code, t->flags, t->priority, t->need_reply, |
5966 | ktime_ms_delta(later: current_time, earlier: t->start_time)); |
5967 | spin_unlock(lock: &t->lock); |
5968 | |
5969 | if (proc != to_proc) { |
5970 | /* |
5971 | * Can only safely deref buffer if we are holding the |
5972 | * correct proc inner lock for this node |
5973 | */ |
5974 | seq_puts(m, s: "\n" ); |
5975 | return; |
5976 | } |
5977 | |
5978 | if (buffer == NULL) { |
5979 | seq_puts(m, s: " buffer free\n" ); |
5980 | return; |
5981 | } |
5982 | if (buffer->target_node) |
5983 | seq_printf(m, fmt: " node %d" , buffer->target_node->debug_id); |
5984 | seq_printf(m, fmt: " size %zd:%zd data %pK\n" , |
5985 | buffer->data_size, buffer->offsets_size, |
5986 | buffer->user_data); |
5987 | } |
5988 | |
5989 | static void print_binder_work_ilocked(struct seq_file *m, |
5990 | struct binder_proc *proc, |
5991 | const char *prefix, |
5992 | const char *transaction_prefix, |
5993 | struct binder_work *w) |
5994 | { |
5995 | struct binder_node *node; |
5996 | struct binder_transaction *t; |
5997 | |
5998 | switch (w->type) { |
5999 | case BINDER_WORK_TRANSACTION: |
6000 | t = container_of(w, struct binder_transaction, work); |
6001 | print_binder_transaction_ilocked( |
6002 | m, proc, prefix: transaction_prefix, t); |
6003 | break; |
6004 | case BINDER_WORK_RETURN_ERROR: { |
6005 | struct binder_error *e = container_of( |
6006 | w, struct binder_error, work); |
6007 | |
6008 | seq_printf(m, fmt: "%stransaction error: %u\n" , |
6009 | prefix, e->cmd); |
6010 | } break; |
6011 | case BINDER_WORK_TRANSACTION_COMPLETE: |
6012 | seq_printf(m, fmt: "%stransaction complete\n" , prefix); |
6013 | break; |
6014 | case BINDER_WORK_NODE: |
6015 | node = container_of(w, struct binder_node, work); |
6016 | seq_printf(m, fmt: "%snode work %d: u%016llx c%016llx\n" , |
6017 | prefix, node->debug_id, |
6018 | (u64)node->ptr, (u64)node->cookie); |
6019 | break; |
6020 | case BINDER_WORK_DEAD_BINDER: |
6021 | seq_printf(m, fmt: "%shas dead binder\n" , prefix); |
6022 | break; |
6023 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
6024 | seq_printf(m, fmt: "%shas cleared dead binder\n" , prefix); |
6025 | break; |
6026 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: |
6027 | seq_printf(m, fmt: "%shas cleared death notification\n" , prefix); |
6028 | break; |
6029 | default: |
6030 | seq_printf(m, fmt: "%sunknown work: type %d\n" , prefix, w->type); |
6031 | break; |
6032 | } |
6033 | } |
6034 | |
6035 | static void print_binder_thread_ilocked(struct seq_file *m, |
6036 | struct binder_thread *thread, |
6037 | int print_always) |
6038 | { |
6039 | struct binder_transaction *t; |
6040 | struct binder_work *w; |
6041 | size_t start_pos = m->count; |
6042 | size_t ; |
6043 | |
6044 | seq_printf(m, fmt: " thread %d: l %02x need_return %d tr %d\n" , |
6045 | thread->pid, thread->looper, |
6046 | thread->looper_need_return, |
6047 | atomic_read(v: &thread->tmp_ref)); |
6048 | header_pos = m->count; |
6049 | t = thread->transaction_stack; |
6050 | while (t) { |
6051 | if (t->from == thread) { |
6052 | print_binder_transaction_ilocked(m, proc: thread->proc, |
6053 | prefix: " outgoing transaction" , t); |
6054 | t = t->from_parent; |
6055 | } else if (t->to_thread == thread) { |
6056 | print_binder_transaction_ilocked(m, proc: thread->proc, |
6057 | prefix: " incoming transaction" , t); |
6058 | t = t->to_parent; |
6059 | } else { |
6060 | print_binder_transaction_ilocked(m, proc: thread->proc, |
6061 | prefix: " bad transaction" , t); |
6062 | t = NULL; |
6063 | } |
6064 | } |
6065 | list_for_each_entry(w, &thread->todo, entry) { |
6066 | print_binder_work_ilocked(m, proc: thread->proc, prefix: " " , |
6067 | transaction_prefix: " pending transaction" , w); |
6068 | } |
6069 | if (!print_always && m->count == header_pos) |
6070 | m->count = start_pos; |
6071 | } |
6072 | |
6073 | static void print_binder_node_nilocked(struct seq_file *m, |
6074 | struct binder_node *node) |
6075 | { |
6076 | struct binder_ref *ref; |
6077 | struct binder_work *w; |
6078 | int count; |
6079 | |
6080 | count = 0; |
6081 | hlist_for_each_entry(ref, &node->refs, node_entry) |
6082 | count++; |
6083 | |
6084 | seq_printf(m, fmt: " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d" , |
6085 | node->debug_id, (u64)node->ptr, (u64)node->cookie, |
6086 | node->has_strong_ref, node->has_weak_ref, |
6087 | node->local_strong_refs, node->local_weak_refs, |
6088 | node->internal_strong_refs, count, node->tmp_refs); |
6089 | if (count) { |
6090 | seq_puts(m, s: " proc" ); |
6091 | hlist_for_each_entry(ref, &node->refs, node_entry) |
6092 | seq_printf(m, fmt: " %d" , ref->proc->pid); |
6093 | } |
6094 | seq_puts(m, s: "\n" ); |
6095 | if (node->proc) { |
6096 | list_for_each_entry(w, &node->async_todo, entry) |
6097 | print_binder_work_ilocked(m, proc: node->proc, prefix: " " , |
6098 | transaction_prefix: " pending async transaction" , w); |
6099 | } |
6100 | } |
6101 | |
6102 | static void print_binder_ref_olocked(struct seq_file *m, |
6103 | struct binder_ref *ref) |
6104 | { |
6105 | binder_node_lock(ref->node); |
6106 | seq_printf(m, fmt: " ref %d: desc %d %snode %d s %d w %d d %pK\n" , |
6107 | ref->data.debug_id, ref->data.desc, |
6108 | ref->node->proc ? "" : "dead " , |
6109 | ref->node->debug_id, ref->data.strong, |
6110 | ref->data.weak, ref->death); |
6111 | binder_node_unlock(ref->node); |
6112 | } |
6113 | |
6114 | static void print_binder_proc(struct seq_file *m, |
6115 | struct binder_proc *proc, int print_all) |
6116 | { |
6117 | struct binder_work *w; |
6118 | struct rb_node *n; |
6119 | size_t start_pos = m->count; |
6120 | size_t ; |
6121 | struct binder_node *last_node = NULL; |
6122 | |
6123 | seq_printf(m, fmt: "proc %d\n" , proc->pid); |
6124 | seq_printf(m, fmt: "context %s\n" , proc->context->name); |
6125 | header_pos = m->count; |
6126 | |
6127 | binder_inner_proc_lock(proc); |
6128 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) |
6129 | print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, |
6130 | rb_node), print_always: print_all); |
6131 | |
6132 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { |
6133 | struct binder_node *node = rb_entry(n, struct binder_node, |
6134 | rb_node); |
6135 | if (!print_all && !node->has_async_transaction) |
6136 | continue; |
6137 | |
6138 | /* |
6139 | * take a temporary reference on the node so it |
6140 | * survives and isn't removed from the tree |
6141 | * while we print it. |
6142 | */ |
6143 | binder_inc_node_tmpref_ilocked(node); |
6144 | /* Need to drop inner lock to take node lock */ |
6145 | binder_inner_proc_unlock(proc); |
6146 | if (last_node) |
6147 | binder_put_node(node: last_node); |
6148 | binder_node_inner_lock(node); |
6149 | print_binder_node_nilocked(m, node); |
6150 | binder_node_inner_unlock(node); |
6151 | last_node = node; |
6152 | binder_inner_proc_lock(proc); |
6153 | } |
6154 | binder_inner_proc_unlock(proc); |
6155 | if (last_node) |
6156 | binder_put_node(node: last_node); |
6157 | |
6158 | if (print_all) { |
6159 | binder_proc_lock(proc); |
6160 | for (n = rb_first(&proc->refs_by_desc); |
6161 | n != NULL; |
6162 | n = rb_next(n)) |
6163 | print_binder_ref_olocked(m, rb_entry(n, |
6164 | struct binder_ref, |
6165 | rb_node_desc)); |
6166 | binder_proc_unlock(proc); |
6167 | } |
6168 | binder_alloc_print_allocated(m, alloc: &proc->alloc); |
6169 | binder_inner_proc_lock(proc); |
6170 | list_for_each_entry(w, &proc->todo, entry) |
6171 | print_binder_work_ilocked(m, proc, prefix: " " , |
6172 | transaction_prefix: " pending transaction" , w); |
6173 | list_for_each_entry(w, &proc->delivered_death, entry) { |
6174 | seq_puts(m, s: " has delivered dead binder\n" ); |
6175 | break; |
6176 | } |
6177 | binder_inner_proc_unlock(proc); |
6178 | if (!print_all && m->count == header_pos) |
6179 | m->count = start_pos; |
6180 | } |
6181 | |
6182 | static const char * const binder_return_strings[] = { |
6183 | "BR_ERROR" , |
6184 | "BR_OK" , |
6185 | "BR_TRANSACTION" , |
6186 | "BR_REPLY" , |
6187 | "BR_ACQUIRE_RESULT" , |
6188 | "BR_DEAD_REPLY" , |
6189 | "BR_TRANSACTION_COMPLETE" , |
6190 | "BR_INCREFS" , |
6191 | "BR_ACQUIRE" , |
6192 | "BR_RELEASE" , |
6193 | "BR_DECREFS" , |
6194 | "BR_ATTEMPT_ACQUIRE" , |
6195 | "BR_NOOP" , |
6196 | "BR_SPAWN_LOOPER" , |
6197 | "BR_FINISHED" , |
6198 | "BR_DEAD_BINDER" , |
6199 | "BR_CLEAR_DEATH_NOTIFICATION_DONE" , |
6200 | "BR_FAILED_REPLY" , |
6201 | "BR_FROZEN_REPLY" , |
6202 | "BR_ONEWAY_SPAM_SUSPECT" , |
6203 | "BR_TRANSACTION_PENDING_FROZEN" |
6204 | }; |
6205 | |
6206 | static const char * const binder_command_strings[] = { |
6207 | "BC_TRANSACTION" , |
6208 | "BC_REPLY" , |
6209 | "BC_ACQUIRE_RESULT" , |
6210 | "BC_FREE_BUFFER" , |
6211 | "BC_INCREFS" , |
6212 | "BC_ACQUIRE" , |
6213 | "BC_RELEASE" , |
6214 | "BC_DECREFS" , |
6215 | "BC_INCREFS_DONE" , |
6216 | "BC_ACQUIRE_DONE" , |
6217 | "BC_ATTEMPT_ACQUIRE" , |
6218 | "BC_REGISTER_LOOPER" , |
6219 | "BC_ENTER_LOOPER" , |
6220 | "BC_EXIT_LOOPER" , |
6221 | "BC_REQUEST_DEATH_NOTIFICATION" , |
6222 | "BC_CLEAR_DEATH_NOTIFICATION" , |
6223 | "BC_DEAD_BINDER_DONE" , |
6224 | "BC_TRANSACTION_SG" , |
6225 | "BC_REPLY_SG" , |
6226 | }; |
6227 | |
6228 | static const char * const binder_objstat_strings[] = { |
6229 | "proc" , |
6230 | "thread" , |
6231 | "node" , |
6232 | "ref" , |
6233 | "death" , |
6234 | "transaction" , |
6235 | "transaction_complete" |
6236 | }; |
6237 | |
6238 | static void print_binder_stats(struct seq_file *m, const char *prefix, |
6239 | struct binder_stats *stats) |
6240 | { |
6241 | int i; |
6242 | |
6243 | BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != |
6244 | ARRAY_SIZE(binder_command_strings)); |
6245 | for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { |
6246 | int temp = atomic_read(v: &stats->bc[i]); |
6247 | |
6248 | if (temp) |
6249 | seq_printf(m, fmt: "%s%s: %d\n" , prefix, |
6250 | binder_command_strings[i], temp); |
6251 | } |
6252 | |
6253 | BUILD_BUG_ON(ARRAY_SIZE(stats->br) != |
6254 | ARRAY_SIZE(binder_return_strings)); |
6255 | for (i = 0; i < ARRAY_SIZE(stats->br); i++) { |
6256 | int temp = atomic_read(v: &stats->br[i]); |
6257 | |
6258 | if (temp) |
6259 | seq_printf(m, fmt: "%s%s: %d\n" , prefix, |
6260 | binder_return_strings[i], temp); |
6261 | } |
6262 | |
6263 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
6264 | ARRAY_SIZE(binder_objstat_strings)); |
6265 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
6266 | ARRAY_SIZE(stats->obj_deleted)); |
6267 | for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { |
6268 | int created = atomic_read(v: &stats->obj_created[i]); |
6269 | int deleted = atomic_read(v: &stats->obj_deleted[i]); |
6270 | |
6271 | if (created || deleted) |
6272 | seq_printf(m, fmt: "%s%s: active %d total %d\n" , |
6273 | prefix, |
6274 | binder_objstat_strings[i], |
6275 | created - deleted, |
6276 | created); |
6277 | } |
6278 | } |
6279 | |
6280 | static void print_binder_proc_stats(struct seq_file *m, |
6281 | struct binder_proc *proc) |
6282 | { |
6283 | struct binder_work *w; |
6284 | struct binder_thread *thread; |
6285 | struct rb_node *n; |
6286 | int count, strong, weak, ready_threads; |
6287 | size_t free_async_space = |
6288 | binder_alloc_get_free_async_space(alloc: &proc->alloc); |
6289 | |
6290 | seq_printf(m, fmt: "proc %d\n" , proc->pid); |
6291 | seq_printf(m, fmt: "context %s\n" , proc->context->name); |
6292 | count = 0; |
6293 | ready_threads = 0; |
6294 | binder_inner_proc_lock(proc); |
6295 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) |
6296 | count++; |
6297 | |
6298 | list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) |
6299 | ready_threads++; |
6300 | |
6301 | seq_printf(m, fmt: " threads: %d\n" , count); |
6302 | seq_printf(m, fmt: " requested threads: %d+%d/%d\n" |
6303 | " ready threads %d\n" |
6304 | " free async space %zd\n" , proc->requested_threads, |
6305 | proc->requested_threads_started, proc->max_threads, |
6306 | ready_threads, |
6307 | free_async_space); |
6308 | count = 0; |
6309 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) |
6310 | count++; |
6311 | binder_inner_proc_unlock(proc); |
6312 | seq_printf(m, fmt: " nodes: %d\n" , count); |
6313 | count = 0; |
6314 | strong = 0; |
6315 | weak = 0; |
6316 | binder_proc_lock(proc); |
6317 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
6318 | struct binder_ref *ref = rb_entry(n, struct binder_ref, |
6319 | rb_node_desc); |
6320 | count++; |
6321 | strong += ref->data.strong; |
6322 | weak += ref->data.weak; |
6323 | } |
6324 | binder_proc_unlock(proc); |
6325 | seq_printf(m, fmt: " refs: %d s %d w %d\n" , count, strong, weak); |
6326 | |
6327 | count = binder_alloc_get_allocated_count(alloc: &proc->alloc); |
6328 | seq_printf(m, fmt: " buffers: %d\n" , count); |
6329 | |
6330 | binder_alloc_print_pages(m, alloc: &proc->alloc); |
6331 | |
6332 | count = 0; |
6333 | binder_inner_proc_lock(proc); |
6334 | list_for_each_entry(w, &proc->todo, entry) { |
6335 | if (w->type == BINDER_WORK_TRANSACTION) |
6336 | count++; |
6337 | } |
6338 | binder_inner_proc_unlock(proc); |
6339 | seq_printf(m, fmt: " pending transactions: %d\n" , count); |
6340 | |
6341 | print_binder_stats(m, prefix: " " , stats: &proc->stats); |
6342 | } |
6343 | |
6344 | static int state_show(struct seq_file *m, void *unused) |
6345 | { |
6346 | struct binder_proc *proc; |
6347 | struct binder_node *node; |
6348 | struct binder_node *last_node = NULL; |
6349 | |
6350 | seq_puts(m, s: "binder state:\n" ); |
6351 | |
6352 | spin_lock(lock: &binder_dead_nodes_lock); |
6353 | if (!hlist_empty(h: &binder_dead_nodes)) |
6354 | seq_puts(m, s: "dead nodes:\n" ); |
6355 | hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { |
6356 | /* |
6357 | * take a temporary reference on the node so it |
6358 | * survives and isn't removed from the list |
6359 | * while we print it. |
6360 | */ |
6361 | node->tmp_refs++; |
6362 | spin_unlock(lock: &binder_dead_nodes_lock); |
6363 | if (last_node) |
6364 | binder_put_node(node: last_node); |
6365 | binder_node_lock(node); |
6366 | print_binder_node_nilocked(m, node); |
6367 | binder_node_unlock(node); |
6368 | last_node = node; |
6369 | spin_lock(lock: &binder_dead_nodes_lock); |
6370 | } |
6371 | spin_unlock(lock: &binder_dead_nodes_lock); |
6372 | if (last_node) |
6373 | binder_put_node(node: last_node); |
6374 | |
6375 | mutex_lock(&binder_procs_lock); |
6376 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
6377 | print_binder_proc(m, proc, print_all: 1); |
6378 | mutex_unlock(lock: &binder_procs_lock); |
6379 | |
6380 | return 0; |
6381 | } |
6382 | |
6383 | static int stats_show(struct seq_file *m, void *unused) |
6384 | { |
6385 | struct binder_proc *proc; |
6386 | |
6387 | seq_puts(m, s: "binder stats:\n" ); |
6388 | |
6389 | print_binder_stats(m, prefix: "" , stats: &binder_stats); |
6390 | |
6391 | mutex_lock(&binder_procs_lock); |
6392 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
6393 | print_binder_proc_stats(m, proc); |
6394 | mutex_unlock(lock: &binder_procs_lock); |
6395 | |
6396 | return 0; |
6397 | } |
6398 | |
6399 | static int transactions_show(struct seq_file *m, void *unused) |
6400 | { |
6401 | struct binder_proc *proc; |
6402 | |
6403 | seq_puts(m, s: "binder transactions:\n" ); |
6404 | mutex_lock(&binder_procs_lock); |
6405 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
6406 | print_binder_proc(m, proc, print_all: 0); |
6407 | mutex_unlock(lock: &binder_procs_lock); |
6408 | |
6409 | return 0; |
6410 | } |
6411 | |
6412 | static int proc_show(struct seq_file *m, void *unused) |
6413 | { |
6414 | struct binder_proc *itr; |
6415 | int pid = (unsigned long)m->private; |
6416 | |
6417 | mutex_lock(&binder_procs_lock); |
6418 | hlist_for_each_entry(itr, &binder_procs, proc_node) { |
6419 | if (itr->pid == pid) { |
6420 | seq_puts(m, s: "binder proc state:\n" ); |
6421 | print_binder_proc(m, proc: itr, print_all: 1); |
6422 | } |
6423 | } |
6424 | mutex_unlock(lock: &binder_procs_lock); |
6425 | |
6426 | return 0; |
6427 | } |
6428 | |
6429 | static void print_binder_transaction_log_entry(struct seq_file *m, |
6430 | struct binder_transaction_log_entry *e) |
6431 | { |
6432 | int debug_id = READ_ONCE(e->debug_id_done); |
6433 | /* |
6434 | * read barrier to guarantee debug_id_done read before |
6435 | * we print the log values |
6436 | */ |
6437 | smp_rmb(); |
6438 | seq_printf(m, |
6439 | fmt: "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d" , |
6440 | e->debug_id, (e->call_type == 2) ? "reply" : |
6441 | ((e->call_type == 1) ? "async" : "call " ), e->from_proc, |
6442 | e->from_thread, e->to_proc, e->to_thread, e->context_name, |
6443 | e->to_node, e->target_handle, e->data_size, e->offsets_size, |
6444 | e->return_error, e->return_error_param, |
6445 | e->return_error_line); |
6446 | /* |
6447 | * read-barrier to guarantee read of debug_id_done after |
6448 | * done printing the fields of the entry |
6449 | */ |
6450 | smp_rmb(); |
6451 | seq_printf(m, fmt: debug_id && debug_id == READ_ONCE(e->debug_id_done) ? |
6452 | "\n" : " (incomplete)\n" ); |
6453 | } |
6454 | |
6455 | static int transaction_log_show(struct seq_file *m, void *unused) |
6456 | { |
6457 | struct binder_transaction_log *log = m->private; |
6458 | unsigned int log_cur = atomic_read(v: &log->cur); |
6459 | unsigned int count; |
6460 | unsigned int cur; |
6461 | int i; |
6462 | |
6463 | count = log_cur + 1; |
6464 | cur = count < ARRAY_SIZE(log->entry) && !log->full ? |
6465 | 0 : count % ARRAY_SIZE(log->entry); |
6466 | if (count > ARRAY_SIZE(log->entry) || log->full) |
6467 | count = ARRAY_SIZE(log->entry); |
6468 | for (i = 0; i < count; i++) { |
6469 | unsigned int index = cur++ % ARRAY_SIZE(log->entry); |
6470 | |
6471 | print_binder_transaction_log_entry(m, e: &log->entry[index]); |
6472 | } |
6473 | return 0; |
6474 | } |
6475 | |
6476 | const struct file_operations binder_fops = { |
6477 | .owner = THIS_MODULE, |
6478 | .poll = binder_poll, |
6479 | .unlocked_ioctl = binder_ioctl, |
6480 | .compat_ioctl = compat_ptr_ioctl, |
6481 | .mmap = binder_mmap, |
6482 | .open = binder_open, |
6483 | .flush = binder_flush, |
6484 | .release = binder_release, |
6485 | }; |
6486 | |
6487 | DEFINE_SHOW_ATTRIBUTE(state); |
6488 | DEFINE_SHOW_ATTRIBUTE(stats); |
6489 | DEFINE_SHOW_ATTRIBUTE(transactions); |
6490 | DEFINE_SHOW_ATTRIBUTE(transaction_log); |
6491 | |
6492 | const struct binder_debugfs_entry binder_debugfs_entries[] = { |
6493 | { |
6494 | .name = "state" , |
6495 | .mode = 0444, |
6496 | .fops = &state_fops, |
6497 | .data = NULL, |
6498 | }, |
6499 | { |
6500 | .name = "stats" , |
6501 | .mode = 0444, |
6502 | .fops = &stats_fops, |
6503 | .data = NULL, |
6504 | }, |
6505 | { |
6506 | .name = "transactions" , |
6507 | .mode = 0444, |
6508 | .fops = &transactions_fops, |
6509 | .data = NULL, |
6510 | }, |
6511 | { |
6512 | .name = "transaction_log" , |
6513 | .mode = 0444, |
6514 | .fops = &transaction_log_fops, |
6515 | .data = &binder_transaction_log, |
6516 | }, |
6517 | { |
6518 | .name = "failed_transaction_log" , |
6519 | .mode = 0444, |
6520 | .fops = &transaction_log_fops, |
6521 | .data = &binder_transaction_log_failed, |
6522 | }, |
6523 | {} /* terminator */ |
6524 | }; |
6525 | |
6526 | static int __init init_binder_device(const char *name) |
6527 | { |
6528 | int ret; |
6529 | struct binder_device *binder_device; |
6530 | |
6531 | binder_device = kzalloc(size: sizeof(*binder_device), GFP_KERNEL); |
6532 | if (!binder_device) |
6533 | return -ENOMEM; |
6534 | |
6535 | binder_device->miscdev.fops = &binder_fops; |
6536 | binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; |
6537 | binder_device->miscdev.name = name; |
6538 | |
6539 | refcount_set(r: &binder_device->ref, n: 1); |
6540 | binder_device->context.binder_context_mgr_uid = INVALID_UID; |
6541 | binder_device->context.name = name; |
6542 | mutex_init(&binder_device->context.context_mgr_node_lock); |
6543 | |
6544 | ret = misc_register(misc: &binder_device->miscdev); |
6545 | if (ret < 0) { |
6546 | kfree(objp: binder_device); |
6547 | return ret; |
6548 | } |
6549 | |
6550 | hlist_add_head(n: &binder_device->hlist, h: &binder_devices); |
6551 | |
6552 | return ret; |
6553 | } |
6554 | |
6555 | static int __init binder_init(void) |
6556 | { |
6557 | int ret; |
6558 | char *device_name, *device_tmp; |
6559 | struct binder_device *device; |
6560 | struct hlist_node *tmp; |
6561 | char *device_names = NULL; |
6562 | const struct binder_debugfs_entry *db_entry; |
6563 | |
6564 | ret = binder_alloc_shrinker_init(); |
6565 | if (ret) |
6566 | return ret; |
6567 | |
6568 | atomic_set(v: &binder_transaction_log.cur, i: ~0U); |
6569 | atomic_set(v: &binder_transaction_log_failed.cur, i: ~0U); |
6570 | |
6571 | binder_debugfs_dir_entry_root = debugfs_create_dir(name: "binder" , NULL); |
6572 | |
6573 | binder_for_each_debugfs_entry(db_entry) |
6574 | debugfs_create_file(name: db_entry->name, |
6575 | mode: db_entry->mode, |
6576 | parent: binder_debugfs_dir_entry_root, |
6577 | data: db_entry->data, |
6578 | fops: db_entry->fops); |
6579 | |
6580 | binder_debugfs_dir_entry_proc = debugfs_create_dir(name: "proc" , |
6581 | parent: binder_debugfs_dir_entry_root); |
6582 | |
6583 | if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && |
6584 | strcmp(binder_devices_param, "" ) != 0) { |
6585 | /* |
6586 | * Copy the module_parameter string, because we don't want to |
6587 | * tokenize it in-place. |
6588 | */ |
6589 | device_names = kstrdup(s: binder_devices_param, GFP_KERNEL); |
6590 | if (!device_names) { |
6591 | ret = -ENOMEM; |
6592 | goto err_alloc_device_names_failed; |
6593 | } |
6594 | |
6595 | device_tmp = device_names; |
6596 | while ((device_name = strsep(&device_tmp, "," ))) { |
6597 | ret = init_binder_device(name: device_name); |
6598 | if (ret) |
6599 | goto err_init_binder_device_failed; |
6600 | } |
6601 | } |
6602 | |
6603 | ret = init_binderfs(); |
6604 | if (ret) |
6605 | goto err_init_binder_device_failed; |
6606 | |
6607 | return ret; |
6608 | |
6609 | err_init_binder_device_failed: |
6610 | hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { |
6611 | misc_deregister(misc: &device->miscdev); |
6612 | hlist_del(n: &device->hlist); |
6613 | kfree(objp: device); |
6614 | } |
6615 | |
6616 | kfree(objp: device_names); |
6617 | |
6618 | err_alloc_device_names_failed: |
6619 | debugfs_remove_recursive(dentry: binder_debugfs_dir_entry_root); |
6620 | binder_alloc_shrinker_exit(); |
6621 | |
6622 | return ret; |
6623 | } |
6624 | |
6625 | device_initcall(binder_init); |
6626 | |
6627 | #define CREATE_TRACE_POINTS |
6628 | #include "binder_trace.h" |
6629 | |
6630 | MODULE_LICENSE("GPL v2" ); |
6631 | |