blob: 4473adef2f5a49b798ed158d659617ca05548a67 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
David Brazdil0f672f62019-12-10 10:32:29 +000060#include <linux/string.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
David Brazdil0f672f62019-12-10 10:32:29 +000066#include <linux/syscalls.h>
67#include <linux/task_work.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020068#include <linux/sizes.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069
70#include <uapi/linux/android/binder.h>
David Brazdil0f672f62019-12-10 10:32:29 +000071#include <uapi/linux/android/binderfs.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072
73#include <asm/cacheflush.h>
74
75#include "binder_alloc.h"
David Brazdil0f672f62019-12-10 10:32:29 +000076#include "binder_internal.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077#include "binder_trace.h"
78
79static HLIST_HEAD(binder_deferred_list);
80static DEFINE_MUTEX(binder_deferred_lock);
81
82static HLIST_HEAD(binder_devices);
83static HLIST_HEAD(binder_procs);
84static DEFINE_MUTEX(binder_procs_lock);
85
86static HLIST_HEAD(binder_dead_nodes);
87static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88
89static struct dentry *binder_debugfs_dir_entry_root;
90static struct dentry *binder_debugfs_dir_entry_proc;
91static atomic_t binder_last_id;
92
David Brazdil0f672f62019-12-10 10:32:29 +000093static int proc_show(struct seq_file *m, void *unused);
94DEFINE_SHOW_ATTRIBUTE(proc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97
98enum {
99 BINDER_DEBUG_USER_ERROR = 1U << 0,
100 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
101 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
102 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
103 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
104 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
105 BINDER_DEBUG_READ_WRITE = 1U << 6,
106 BINDER_DEBUG_USER_REFS = 1U << 7,
107 BINDER_DEBUG_THREADS = 1U << 8,
108 BINDER_DEBUG_TRANSACTION = 1U << 9,
109 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
110 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
111 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
112 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
113 BINDER_DEBUG_SPINLOCKS = 1U << 14,
114};
115static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118
David Brazdil0f672f62019-12-10 10:32:29 +0000119char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120module_param_named(devices, binder_devices_param, charp, 0444);
121
122static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123static int binder_stop_on_user_error;
124
125static int binder_set_stop_on_user_error(const char *val,
126 const struct kernel_param *kp)
127{
128 int ret;
129
130 ret = param_set_int(val, kp);
131 if (binder_stop_on_user_error < 2)
132 wake_up(&binder_user_error_wait);
133 return ret;
134}
135module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 param_get_int, &binder_stop_on_user_error, 0644);
137
138#define binder_debug(mask, x...) \
139 do { \
140 if (binder_debug_mask & mask) \
141 pr_info_ratelimited(x); \
142 } while (0)
143
144#define binder_user_error(x...) \
145 do { \
146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 pr_info_ratelimited(x); \
148 if (binder_stop_on_user_error) \
149 binder_stop_on_user_error = 2; \
150 } while (0)
151
152#define to_flat_binder_object(hdr) \
153 container_of(hdr, struct flat_binder_object, hdr)
154
155#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156
157#define to_binder_buffer_object(hdr) \
158 container_of(hdr, struct binder_buffer_object, hdr)
159
160#define to_binder_fd_array_object(hdr) \
161 container_of(hdr, struct binder_fd_array_object, hdr)
162
163enum binder_stat_types {
164 BINDER_STAT_PROC,
165 BINDER_STAT_THREAD,
166 BINDER_STAT_NODE,
167 BINDER_STAT_REF,
168 BINDER_STAT_DEATH,
169 BINDER_STAT_TRANSACTION,
170 BINDER_STAT_TRANSACTION_COMPLETE,
171 BINDER_STAT_COUNT
172};
173
174struct binder_stats {
175 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 atomic_t obj_created[BINDER_STAT_COUNT];
178 atomic_t obj_deleted[BINDER_STAT_COUNT];
179};
180
181static struct binder_stats binder_stats;
182
183static inline void binder_stats_deleted(enum binder_stat_types type)
184{
185 atomic_inc(&binder_stats.obj_deleted[type]);
186}
187
188static inline void binder_stats_created(enum binder_stat_types type)
189{
190 atomic_inc(&binder_stats.obj_created[type]);
191}
192
David Brazdil0f672f62019-12-10 10:32:29 +0000193struct binder_transaction_log binder_transaction_log;
194struct binder_transaction_log binder_transaction_log_failed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195
196static struct binder_transaction_log_entry *binder_transaction_log_add(
197 struct binder_transaction_log *log)
198{
199 struct binder_transaction_log_entry *e;
200 unsigned int cur = atomic_inc_return(&log->cur);
201
202 if (cur >= ARRAY_SIZE(log->entry))
203 log->full = true;
204 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 WRITE_ONCE(e->debug_id_done, 0);
206 /*
207 * write-barrier to synchronize access to e->debug_id_done.
208 * We make sure the initialized 0 value is seen before
209 * memset() other fields are zeroed by memset.
210 */
211 smp_wmb();
212 memset(e, 0, sizeof(*e));
213 return e;
214}
215
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216/**
217 * struct binder_work - work enqueued on a worklist
218 * @entry: node enqueued on list
219 * @type: type of work to be performed
220 *
221 * There are separate work lists for proc, thread, and node (async).
222 */
223struct binder_work {
224 struct list_head entry;
225
Olivier Deprez0e641232021-09-23 10:07:05 +0200226 enum binder_work_type {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227 BINDER_WORK_TRANSACTION = 1,
228 BINDER_WORK_TRANSACTION_COMPLETE,
229 BINDER_WORK_RETURN_ERROR,
230 BINDER_WORK_NODE,
231 BINDER_WORK_DEAD_BINDER,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234 } type;
235};
236
237struct binder_error {
238 struct binder_work work;
239 uint32_t cmd;
240};
241
242/**
243 * struct binder_node - binder node bookkeeping
244 * @debug_id: unique ID for debugging
245 * (invariant after initialized)
246 * @lock: lock for node fields
247 * @work: worklist element for node work
248 * (protected by @proc->inner_lock)
249 * @rb_node: element for proc->nodes tree
250 * (protected by @proc->inner_lock)
251 * @dead_node: element for binder_dead_nodes list
252 * (protected by binder_dead_nodes_lock)
253 * @proc: binder_proc that owns this node
254 * (invariant after initialized)
255 * @refs: list of references on this node
256 * (protected by @lock)
257 * @internal_strong_refs: used to take strong references when
258 * initiating a transaction
259 * (protected by @proc->inner_lock if @proc
260 * and by @lock)
261 * @local_weak_refs: weak user refs from local process
262 * (protected by @proc->inner_lock if @proc
263 * and by @lock)
264 * @local_strong_refs: strong user refs from local process
265 * (protected by @proc->inner_lock if @proc
266 * and by @lock)
267 * @tmp_refs: temporary kernel refs
268 * (protected by @proc->inner_lock while @proc
269 * is valid, and by binder_dead_nodes_lock
270 * if @proc is NULL. During inc/dec and node release
271 * it is also protected by @lock to provide safety
272 * as the node dies and @proc becomes NULL)
273 * @ptr: userspace pointer for node
274 * (invariant, no lock needed)
275 * @cookie: userspace cookie for node
276 * (invariant, no lock needed)
277 * @has_strong_ref: userspace notified of strong ref
278 * (protected by @proc->inner_lock if @proc
279 * and by @lock)
280 * @pending_strong_ref: userspace has acked notification of strong ref
281 * (protected by @proc->inner_lock if @proc
282 * and by @lock)
283 * @has_weak_ref: userspace notified of weak ref
284 * (protected by @proc->inner_lock if @proc
285 * and by @lock)
286 * @pending_weak_ref: userspace has acked notification of weak ref
287 * (protected by @proc->inner_lock if @proc
288 * and by @lock)
289 * @has_async_transaction: async transaction to node in progress
290 * (protected by @lock)
291 * @accept_fds: file descriptor operations supported for node
292 * (invariant after initialized)
293 * @min_priority: minimum scheduling priority
294 * (invariant after initialized)
David Brazdil0f672f62019-12-10 10:32:29 +0000295 * @txn_security_ctx: require sender's security context
296 * (invariant after initialized)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297 * @async_todo: list of async work items
298 * (protected by @proc->inner_lock)
299 *
300 * Bookkeeping structure for binder nodes.
301 */
302struct binder_node {
303 int debug_id;
304 spinlock_t lock;
305 struct binder_work work;
306 union {
307 struct rb_node rb_node;
308 struct hlist_node dead_node;
309 };
310 struct binder_proc *proc;
311 struct hlist_head refs;
312 int internal_strong_refs;
313 int local_weak_refs;
314 int local_strong_refs;
315 int tmp_refs;
316 binder_uintptr_t ptr;
317 binder_uintptr_t cookie;
318 struct {
319 /*
320 * bitfield elements protected by
321 * proc inner_lock
322 */
323 u8 has_strong_ref:1;
324 u8 pending_strong_ref:1;
325 u8 has_weak_ref:1;
326 u8 pending_weak_ref:1;
327 };
328 struct {
329 /*
330 * invariant after initialization
331 */
332 u8 accept_fds:1;
David Brazdil0f672f62019-12-10 10:32:29 +0000333 u8 txn_security_ctx:1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334 u8 min_priority;
335 };
336 bool has_async_transaction;
337 struct list_head async_todo;
338};
339
340struct binder_ref_death {
341 /**
342 * @work: worklist element for death notifications
343 * (protected by inner_lock of the proc that
344 * this ref belongs to)
345 */
346 struct binder_work work;
347 binder_uintptr_t cookie;
348};
349
350/**
351 * struct binder_ref_data - binder_ref counts and id
352 * @debug_id: unique ID for the ref
353 * @desc: unique userspace handle for ref
354 * @strong: strong ref count (debugging only if not locked)
355 * @weak: weak ref count (debugging only if not locked)
356 *
357 * Structure to hold ref count and ref id information. Since
358 * the actual ref can only be accessed with a lock, this structure
359 * is used to return information about the ref to callers of
360 * ref inc/dec functions.
361 */
362struct binder_ref_data {
363 int debug_id;
364 uint32_t desc;
365 int strong;
366 int weak;
367};
368
369/**
370 * struct binder_ref - struct to track references on nodes
371 * @data: binder_ref_data containing id, handle, and current refcounts
372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373 * @rb_node_node: node for lookup by @node in proc's rb_tree
374 * @node_entry: list entry for node->refs list in target node
375 * (protected by @node->lock)
376 * @proc: binder_proc containing ref
377 * @node: binder_node of target node. When cleaning up a
378 * ref for deletion in binder_cleanup_ref, a non-NULL
379 * @node indicates the node must be freed
380 * @death: pointer to death notification (ref_death) if requested
381 * (protected by @node->lock)
382 *
383 * Structure to track references from procA to target node (on procB). This
384 * structure is unsafe to access without holding @proc->outer_lock.
385 */
386struct binder_ref {
387 /* Lookups needed: */
388 /* node + proc => ref (transaction) */
389 /* desc + proc => ref (transaction, inc/dec ref) */
390 /* node => refs + procs (proc exit) */
391 struct binder_ref_data data;
392 struct rb_node rb_node_desc;
393 struct rb_node rb_node_node;
394 struct hlist_node node_entry;
395 struct binder_proc *proc;
396 struct binder_node *node;
397 struct binder_ref_death *death;
398};
399
400enum binder_deferred_state {
David Brazdil0f672f62019-12-10 10:32:29 +0000401 BINDER_DEFERRED_FLUSH = 0x01,
402 BINDER_DEFERRED_RELEASE = 0x02,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403};
404
405/**
406 * struct binder_proc - binder process bookkeeping
407 * @proc_node: element for binder_procs list
408 * @threads: rbtree of binder_threads in this proc
409 * (protected by @inner_lock)
410 * @nodes: rbtree of binder nodes associated with
411 * this proc ordered by node->ptr
412 * (protected by @inner_lock)
413 * @refs_by_desc: rbtree of refs ordered by ref->desc
414 * (protected by @outer_lock)
415 * @refs_by_node: rbtree of refs ordered by ref->node
416 * (protected by @outer_lock)
417 * @waiting_threads: threads currently waiting for proc work
418 * (protected by @inner_lock)
419 * @pid PID of group_leader of process
420 * (invariant after initialized)
421 * @tsk task_struct for group_leader of process
422 * (invariant after initialized)
Olivier Deprez157378f2022-04-04 15:47:50 +0200423 * @cred struct cred associated with the `struct file`
424 * in binder_open()
425 * (invariant after initialized)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 * @deferred_work_node: element for binder_deferred_list
427 * (protected by binder_deferred_lock)
428 * @deferred_work: bitmap of deferred work to perform
429 * (protected by binder_deferred_lock)
430 * @is_dead: process is dead and awaiting free
431 * when outstanding transactions are cleaned up
432 * (protected by @inner_lock)
433 * @todo: list of work for this process
434 * (protected by @inner_lock)
435 * @stats: per-process binder statistics
436 * (atomics, no lock needed)
437 * @delivered_death: list of delivered death notification
438 * (protected by @inner_lock)
439 * @max_threads: cap on number of binder threads
440 * (protected by @inner_lock)
441 * @requested_threads: number of binder threads requested but not
442 * yet started. In current implementation, can
443 * only be 0 or 1.
444 * (protected by @inner_lock)
445 * @requested_threads_started: number binder threads started
446 * (protected by @inner_lock)
447 * @tmp_ref: temporary reference to indicate proc is in use
448 * (protected by @inner_lock)
449 * @default_priority: default scheduler priority
450 * (invariant after initialized)
451 * @debugfs_entry: debugfs node
452 * @alloc: binder allocator bookkeeping
453 * @context: binder_context for this proc
454 * (invariant after initialized)
455 * @inner_lock: can nest under outer_lock and/or node lock
456 * @outer_lock: no nesting under innor or node lock
457 * Lock order: 1) outer, 2) node, 3) inner
David Brazdil0f672f62019-12-10 10:32:29 +0000458 * @binderfs_entry: process-specific binderfs log file
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459 *
460 * Bookkeeping structure for binder processes
461 */
462struct binder_proc {
463 struct hlist_node proc_node;
464 struct rb_root threads;
465 struct rb_root nodes;
466 struct rb_root refs_by_desc;
467 struct rb_root refs_by_node;
468 struct list_head waiting_threads;
469 int pid;
470 struct task_struct *tsk;
Olivier Deprez157378f2022-04-04 15:47:50 +0200471 const struct cred *cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472 struct hlist_node deferred_work_node;
473 int deferred_work;
474 bool is_dead;
475
476 struct list_head todo;
477 struct binder_stats stats;
478 struct list_head delivered_death;
479 int max_threads;
480 int requested_threads;
481 int requested_threads_started;
482 int tmp_ref;
483 long default_priority;
484 struct dentry *debugfs_entry;
485 struct binder_alloc alloc;
486 struct binder_context *context;
487 spinlock_t inner_lock;
488 spinlock_t outer_lock;
David Brazdil0f672f62019-12-10 10:32:29 +0000489 struct dentry *binderfs_entry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490};
491
492enum {
493 BINDER_LOOPER_STATE_REGISTERED = 0x01,
494 BINDER_LOOPER_STATE_ENTERED = 0x02,
495 BINDER_LOOPER_STATE_EXITED = 0x04,
496 BINDER_LOOPER_STATE_INVALID = 0x08,
497 BINDER_LOOPER_STATE_WAITING = 0x10,
498 BINDER_LOOPER_STATE_POLL = 0x20,
499};
500
501/**
502 * struct binder_thread - binder thread bookkeeping
503 * @proc: binder process for this thread
504 * (invariant after initialization)
505 * @rb_node: element for proc->threads rbtree
506 * (protected by @proc->inner_lock)
507 * @waiting_thread_node: element for @proc->waiting_threads list
508 * (protected by @proc->inner_lock)
509 * @pid: PID for this thread
510 * (invariant after initialization)
511 * @looper: bitmap of looping state
512 * (only accessed by this thread)
513 * @looper_needs_return: looping thread needs to exit driver
514 * (no lock needed)
515 * @transaction_stack: stack of in-progress transactions for this thread
516 * (protected by @proc->inner_lock)
517 * @todo: list of work to do for this thread
518 * (protected by @proc->inner_lock)
519 * @process_todo: whether work in @todo should be processed
520 * (protected by @proc->inner_lock)
521 * @return_error: transaction errors reported by this thread
522 * (only accessed by this thread)
523 * @reply_error: transaction errors reported by target thread
524 * (protected by @proc->inner_lock)
525 * @wait: wait queue for thread work
526 * @stats: per-thread statistics
527 * (atomics, no lock needed)
528 * @tmp_ref: temporary reference to indicate thread is in use
529 * (atomic since @proc->inner_lock cannot
530 * always be acquired)
531 * @is_dead: thread is dead and awaiting free
532 * when outstanding transactions are cleaned up
533 * (protected by @proc->inner_lock)
534 *
535 * Bookkeeping structure for binder threads.
536 */
537struct binder_thread {
538 struct binder_proc *proc;
539 struct rb_node rb_node;
540 struct list_head waiting_thread_node;
541 int pid;
542 int looper; /* only modified by this thread */
543 bool looper_need_return; /* can be written by other thread */
544 struct binder_transaction *transaction_stack;
545 struct list_head todo;
546 bool process_todo;
547 struct binder_error return_error;
548 struct binder_error reply_error;
549 wait_queue_head_t wait;
550 struct binder_stats stats;
551 atomic_t tmp_ref;
552 bool is_dead;
553};
554
David Brazdil0f672f62019-12-10 10:32:29 +0000555/**
556 * struct binder_txn_fd_fixup - transaction fd fixup list element
557 * @fixup_entry: list entry
558 * @file: struct file to be associated with new fd
559 * @offset: offset in buffer data to this fixup
560 *
561 * List element for fd fixups in a transaction. Since file
562 * descriptors need to be allocated in the context of the
563 * target process, we pass each fd to be processed in this
564 * struct.
565 */
566struct binder_txn_fd_fixup {
567 struct list_head fixup_entry;
568 struct file *file;
569 size_t offset;
570};
571
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572struct binder_transaction {
573 int debug_id;
574 struct binder_work work;
575 struct binder_thread *from;
576 struct binder_transaction *from_parent;
577 struct binder_proc *to_proc;
578 struct binder_thread *to_thread;
579 struct binder_transaction *to_parent;
580 unsigned need_reply:1;
581 /* unsigned is_dead:1; */ /* not used at the moment */
582
583 struct binder_buffer *buffer;
584 unsigned int code;
585 unsigned int flags;
586 long priority;
587 long saved_priority;
588 kuid_t sender_euid;
David Brazdil0f672f62019-12-10 10:32:29 +0000589 struct list_head fd_fixups;
590 binder_uintptr_t security_ctx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 /**
592 * @lock: protects @from, @to_proc, and @to_thread
593 *
594 * @from, @to_proc, and @to_thread can be set to NULL
595 * during thread teardown
596 */
597 spinlock_t lock;
598};
599
600/**
David Brazdil0f672f62019-12-10 10:32:29 +0000601 * struct binder_object - union of flat binder object types
602 * @hdr: generic object header
603 * @fbo: binder object (nodes and refs)
604 * @fdo: file descriptor object
605 * @bbo: binder buffer pointer
606 * @fdao: file descriptor array
607 *
608 * Used for type-independent object copies
609 */
610struct binder_object {
611 union {
612 struct binder_object_header hdr;
613 struct flat_binder_object fbo;
614 struct binder_fd_object fdo;
615 struct binder_buffer_object bbo;
616 struct binder_fd_array_object fdao;
617 };
618};
619
620/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621 * binder_proc_lock() - Acquire outer lock for given binder_proc
622 * @proc: struct binder_proc to acquire
623 *
624 * Acquires proc->outer_lock. Used to protect binder_ref
625 * structures associated with the given proc.
626 */
627#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
628static void
629_binder_proc_lock(struct binder_proc *proc, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000630 __acquires(&proc->outer_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000631{
632 binder_debug(BINDER_DEBUG_SPINLOCKS,
633 "%s: line=%d\n", __func__, line);
634 spin_lock(&proc->outer_lock);
635}
636
637/**
638 * binder_proc_unlock() - Release spinlock for given binder_proc
639 * @proc: struct binder_proc to acquire
640 *
641 * Release lock acquired via binder_proc_lock()
642 */
643#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
644static void
645_binder_proc_unlock(struct binder_proc *proc, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000646 __releases(&proc->outer_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000647{
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
651}
652
653/**
654 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655 * @proc: struct binder_proc to acquire
656 *
657 * Acquires proc->inner_lock. Used to protect todo lists
658 */
659#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
660static void
661_binder_inner_proc_lock(struct binder_proc *proc, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000662 __acquires(&proc->inner_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663{
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_lock(&proc->inner_lock);
667}
668
669/**
670 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
671 * @proc: struct binder_proc to acquire
672 *
673 * Release lock acquired via binder_inner_proc_lock()
674 */
675#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
676static void
677_binder_inner_proc_unlock(struct binder_proc *proc, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000678 __releases(&proc->inner_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679{
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_unlock(&proc->inner_lock);
683}
684
685/**
686 * binder_node_lock() - Acquire spinlock for given binder_node
687 * @node: struct binder_node to acquire
688 *
689 * Acquires node->lock. Used to protect binder_node fields
690 */
691#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
692static void
693_binder_node_lock(struct binder_node *node, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000694 __acquires(&node->lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695{
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_lock(&node->lock);
699}
700
701/**
702 * binder_node_unlock() - Release spinlock for given binder_proc
703 * @node: struct binder_node to acquire
704 *
705 * Release lock acquired via binder_node_lock()
706 */
707#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
708static void
709_binder_node_unlock(struct binder_node *node, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000710 __releases(&node->lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711{
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_unlock(&node->lock);
715}
716
717/**
718 * binder_node_inner_lock() - Acquire node and inner locks
719 * @node: struct binder_node to acquire
720 *
721 * Acquires node->lock. If node->proc also acquires
722 * proc->inner_lock. Used to protect binder_node fields
723 */
724#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
725static void
726_binder_node_inner_lock(struct binder_node *node, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000727 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728{
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
732 if (node->proc)
733 binder_inner_proc_lock(node->proc);
David Brazdil0f672f62019-12-10 10:32:29 +0000734 else
735 /* annotation for sparse */
736 __acquire(&node->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000737}
738
739/**
740 * binder_node_unlock() - Release node and inner locks
741 * @node: struct binder_node to acquire
742 *
743 * Release lock acquired via binder_node_lock()
744 */
745#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
746static void
747_binder_node_inner_unlock(struct binder_node *node, int line)
David Brazdil0f672f62019-12-10 10:32:29 +0000748 __releases(&node->lock) __releases(&node->proc->inner_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749{
750 struct binder_proc *proc = node->proc;
751
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
754 if (proc)
755 binder_inner_proc_unlock(proc);
David Brazdil0f672f62019-12-10 10:32:29 +0000756 else
757 /* annotation for sparse */
758 __release(&node->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759 spin_unlock(&node->lock);
760}
761
762static bool binder_worklist_empty_ilocked(struct list_head *list)
763{
764 return list_empty(list);
765}
766
767/**
768 * binder_worklist_empty() - Check if no items on the work list
769 * @proc: binder_proc associated with list
770 * @list: list to check
771 *
772 * Return: true if there are no items on list, else false
773 */
774static bool binder_worklist_empty(struct binder_proc *proc,
775 struct list_head *list)
776{
777 bool ret;
778
779 binder_inner_proc_lock(proc);
780 ret = binder_worklist_empty_ilocked(list);
781 binder_inner_proc_unlock(proc);
782 return ret;
783}
784
785/**
786 * binder_enqueue_work_ilocked() - Add an item to the work list
787 * @work: struct binder_work to add to list
788 * @target_list: list to add work to
789 *
790 * Adds the work to the specified list. Asserts that work
791 * is not already on a list.
792 *
793 * Requires the proc->inner_lock to be held.
794 */
795static void
796binder_enqueue_work_ilocked(struct binder_work *work,
797 struct list_head *target_list)
798{
799 BUG_ON(target_list == NULL);
800 BUG_ON(work->entry.next && !list_empty(&work->entry));
801 list_add_tail(&work->entry, target_list);
802}
803
804/**
805 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
806 * @thread: thread to queue work to
807 * @work: struct binder_work to add to list
808 *
809 * Adds the work to the todo list of the thread. Doesn't set the process_todo
810 * flag, which means that (if it wasn't already set) the thread will go to
811 * sleep without handling this work when it calls read.
812 *
813 * Requires the proc->inner_lock to be held.
814 */
815static void
816binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817 struct binder_work *work)
818{
David Brazdil0f672f62019-12-10 10:32:29 +0000819 WARN_ON(!list_empty(&thread->waiting_thread_node));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 binder_enqueue_work_ilocked(work, &thread->todo);
821}
822
823/**
824 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
825 * @thread: thread to queue work to
826 * @work: struct binder_work to add to list
827 *
828 * Adds the work to the todo list of the thread, and enables processing
829 * of the todo queue.
830 *
831 * Requires the proc->inner_lock to be held.
832 */
833static void
834binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835 struct binder_work *work)
836{
David Brazdil0f672f62019-12-10 10:32:29 +0000837 WARN_ON(!list_empty(&thread->waiting_thread_node));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000838 binder_enqueue_work_ilocked(work, &thread->todo);
839 thread->process_todo = true;
840}
841
842/**
843 * binder_enqueue_thread_work() - Add an item to the thread work list
844 * @thread: thread to queue work to
845 * @work: struct binder_work to add to list
846 *
847 * Adds the work to the todo list of the thread, and enables processing
848 * of the todo queue.
849 */
850static void
851binder_enqueue_thread_work(struct binder_thread *thread,
852 struct binder_work *work)
853{
854 binder_inner_proc_lock(thread->proc);
855 binder_enqueue_thread_work_ilocked(thread, work);
856 binder_inner_proc_unlock(thread->proc);
857}
858
859static void
860binder_dequeue_work_ilocked(struct binder_work *work)
861{
862 list_del_init(&work->entry);
863}
864
865/**
866 * binder_dequeue_work() - Removes an item from the work list
867 * @proc: binder_proc associated with list
868 * @work: struct binder_work to remove from list
869 *
870 * Removes the specified work item from whatever list it is on.
871 * Can safely be called if work is not on any list.
872 */
873static void
874binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
875{
876 binder_inner_proc_lock(proc);
877 binder_dequeue_work_ilocked(work);
878 binder_inner_proc_unlock(proc);
879}
880
881static struct binder_work *binder_dequeue_work_head_ilocked(
882 struct list_head *list)
883{
884 struct binder_work *w;
885
886 w = list_first_entry_or_null(list, struct binder_work, entry);
887 if (w)
888 list_del_init(&w->entry);
889 return w;
890}
891
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892static void
893binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
894static void binder_free_thread(struct binder_thread *thread);
895static void binder_free_proc(struct binder_proc *proc);
896static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898static bool binder_has_work_ilocked(struct binder_thread *thread,
899 bool do_proc_work)
900{
901 return thread->process_todo ||
902 thread->looper_need_return ||
903 (do_proc_work &&
904 !binder_worklist_empty_ilocked(&thread->proc->todo));
905}
906
907static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
908{
909 bool has_work;
910
911 binder_inner_proc_lock(thread->proc);
912 has_work = binder_has_work_ilocked(thread, do_proc_work);
913 binder_inner_proc_unlock(thread->proc);
914
915 return has_work;
916}
917
918static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
919{
920 return !thread->transaction_stack &&
921 binder_worklist_empty_ilocked(&thread->todo) &&
922 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
923 BINDER_LOOPER_STATE_REGISTERED));
924}
925
926static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
927 bool sync)
928{
929 struct rb_node *n;
930 struct binder_thread *thread;
931
932 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
933 thread = rb_entry(n, struct binder_thread, rb_node);
934 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
935 binder_available_for_proc_work_ilocked(thread)) {
936 if (sync)
937 wake_up_interruptible_sync(&thread->wait);
938 else
939 wake_up_interruptible(&thread->wait);
940 }
941 }
942}
943
944/**
945 * binder_select_thread_ilocked() - selects a thread for doing proc work.
946 * @proc: process to select a thread from
947 *
948 * Note that calling this function moves the thread off the waiting_threads
949 * list, so it can only be woken up by the caller of this function, or a
950 * signal. Therefore, callers *should* always wake up the thread this function
951 * returns.
952 *
953 * Return: If there's a thread currently waiting for process work,
954 * returns that thread. Otherwise returns NULL.
955 */
956static struct binder_thread *
957binder_select_thread_ilocked(struct binder_proc *proc)
958{
959 struct binder_thread *thread;
960
961 assert_spin_locked(&proc->inner_lock);
962 thread = list_first_entry_or_null(&proc->waiting_threads,
963 struct binder_thread,
964 waiting_thread_node);
965
966 if (thread)
967 list_del_init(&thread->waiting_thread_node);
968
969 return thread;
970}
971
972/**
973 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
974 * @proc: process to wake up a thread in
975 * @thread: specific thread to wake-up (may be NULL)
976 * @sync: whether to do a synchronous wake-up
977 *
978 * This function wakes up a thread in the @proc process.
979 * The caller may provide a specific thread to wake-up in
980 * the @thread parameter. If @thread is NULL, this function
981 * will wake up threads that have called poll().
982 *
983 * Note that for this function to work as expected, callers
984 * should first call binder_select_thread() to find a thread
985 * to handle the work (if they don't have a thread already),
986 * and pass the result into the @thread parameter.
987 */
988static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
989 struct binder_thread *thread,
990 bool sync)
991{
992 assert_spin_locked(&proc->inner_lock);
993
994 if (thread) {
995 if (sync)
996 wake_up_interruptible_sync(&thread->wait);
997 else
998 wake_up_interruptible(&thread->wait);
999 return;
1000 }
1001
1002 /* Didn't find a thread waiting for proc work; this can happen
1003 * in two scenarios:
1004 * 1. All threads are busy handling transactions
1005 * In that case, one of those threads should call back into
1006 * the kernel driver soon and pick up this work.
1007 * 2. Threads are using the (e)poll interface, in which case
1008 * they may be blocked on the waitqueue without having been
1009 * added to waiting_threads. For this case, we just iterate
1010 * over all threads not handling transaction work, and
1011 * wake them all up. We wake all because we don't know whether
1012 * a thread that called into (e)poll is handling non-binder
1013 * work currently.
1014 */
1015 binder_wakeup_poll_threads_ilocked(proc, sync);
1016}
1017
1018static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1019{
1020 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1021
1022 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1023}
1024
1025static void binder_set_nice(long nice)
1026{
1027 long min_nice;
1028
1029 if (can_nice(current, nice)) {
1030 set_user_nice(current, nice);
1031 return;
1032 }
1033 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1034 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1035 "%d: nice value %ld not allowed use %ld instead\n",
1036 current->pid, nice, min_nice);
1037 set_user_nice(current, min_nice);
1038 if (min_nice <= MAX_NICE)
1039 return;
1040 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1041}
1042
1043static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1044 binder_uintptr_t ptr)
1045{
1046 struct rb_node *n = proc->nodes.rb_node;
1047 struct binder_node *node;
1048
1049 assert_spin_locked(&proc->inner_lock);
1050
1051 while (n) {
1052 node = rb_entry(n, struct binder_node, rb_node);
1053
1054 if (ptr < node->ptr)
1055 n = n->rb_left;
1056 else if (ptr > node->ptr)
1057 n = n->rb_right;
1058 else {
1059 /*
1060 * take an implicit weak reference
1061 * to ensure node stays alive until
1062 * call to binder_put_node()
1063 */
1064 binder_inc_node_tmpref_ilocked(node);
1065 return node;
1066 }
1067 }
1068 return NULL;
1069}
1070
1071static struct binder_node *binder_get_node(struct binder_proc *proc,
1072 binder_uintptr_t ptr)
1073{
1074 struct binder_node *node;
1075
1076 binder_inner_proc_lock(proc);
1077 node = binder_get_node_ilocked(proc, ptr);
1078 binder_inner_proc_unlock(proc);
1079 return node;
1080}
1081
1082static struct binder_node *binder_init_node_ilocked(
1083 struct binder_proc *proc,
1084 struct binder_node *new_node,
1085 struct flat_binder_object *fp)
1086{
1087 struct rb_node **p = &proc->nodes.rb_node;
1088 struct rb_node *parent = NULL;
1089 struct binder_node *node;
1090 binder_uintptr_t ptr = fp ? fp->binder : 0;
1091 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1092 __u32 flags = fp ? fp->flags : 0;
1093
1094 assert_spin_locked(&proc->inner_lock);
1095
1096 while (*p) {
1097
1098 parent = *p;
1099 node = rb_entry(parent, struct binder_node, rb_node);
1100
1101 if (ptr < node->ptr)
1102 p = &(*p)->rb_left;
1103 else if (ptr > node->ptr)
1104 p = &(*p)->rb_right;
1105 else {
1106 /*
1107 * A matching node is already in
1108 * the rb tree. Abandon the init
1109 * and return it.
1110 */
1111 binder_inc_node_tmpref_ilocked(node);
1112 return node;
1113 }
1114 }
1115 node = new_node;
1116 binder_stats_created(BINDER_STAT_NODE);
1117 node->tmp_refs++;
1118 rb_link_node(&node->rb_node, parent, p);
1119 rb_insert_color(&node->rb_node, &proc->nodes);
1120 node->debug_id = atomic_inc_return(&binder_last_id);
1121 node->proc = proc;
1122 node->ptr = ptr;
1123 node->cookie = cookie;
1124 node->work.type = BINDER_WORK_NODE;
1125 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1126 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
David Brazdil0f672f62019-12-10 10:32:29 +00001127 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001128 spin_lock_init(&node->lock);
1129 INIT_LIST_HEAD(&node->work.entry);
1130 INIT_LIST_HEAD(&node->async_todo);
1131 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1132 "%d:%d node %d u%016llx c%016llx created\n",
1133 proc->pid, current->pid, node->debug_id,
1134 (u64)node->ptr, (u64)node->cookie);
1135
1136 return node;
1137}
1138
1139static struct binder_node *binder_new_node(struct binder_proc *proc,
1140 struct flat_binder_object *fp)
1141{
1142 struct binder_node *node;
1143 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1144
1145 if (!new_node)
1146 return NULL;
1147 binder_inner_proc_lock(proc);
1148 node = binder_init_node_ilocked(proc, new_node, fp);
1149 binder_inner_proc_unlock(proc);
1150 if (node != new_node)
1151 /*
1152 * The node was already added by another thread
1153 */
1154 kfree(new_node);
1155
1156 return node;
1157}
1158
1159static void binder_free_node(struct binder_node *node)
1160{
1161 kfree(node);
1162 binder_stats_deleted(BINDER_STAT_NODE);
1163}
1164
1165static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1166 int internal,
1167 struct list_head *target_list)
1168{
1169 struct binder_proc *proc = node->proc;
1170
1171 assert_spin_locked(&node->lock);
1172 if (proc)
1173 assert_spin_locked(&proc->inner_lock);
1174 if (strong) {
1175 if (internal) {
1176 if (target_list == NULL &&
1177 node->internal_strong_refs == 0 &&
1178 !(node->proc &&
1179 node == node->proc->context->binder_context_mgr_node &&
1180 node->has_strong_ref)) {
1181 pr_err("invalid inc strong node for %d\n",
1182 node->debug_id);
1183 return -EINVAL;
1184 }
1185 node->internal_strong_refs++;
1186 } else
1187 node->local_strong_refs++;
1188 if (!node->has_strong_ref && target_list) {
David Brazdil0f672f62019-12-10 10:32:29 +00001189 struct binder_thread *thread = container_of(target_list,
1190 struct binder_thread, todo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191 binder_dequeue_work_ilocked(&node->work);
David Brazdil0f672f62019-12-10 10:32:29 +00001192 BUG_ON(&thread->todo != target_list);
1193 binder_enqueue_deferred_thread_work_ilocked(thread,
1194 &node->work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001195 }
1196 } else {
1197 if (!internal)
1198 node->local_weak_refs++;
1199 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1200 if (target_list == NULL) {
1201 pr_err("invalid inc weak node for %d\n",
1202 node->debug_id);
1203 return -EINVAL;
1204 }
1205 /*
1206 * See comment above
1207 */
1208 binder_enqueue_work_ilocked(&node->work, target_list);
1209 }
1210 }
1211 return 0;
1212}
1213
1214static int binder_inc_node(struct binder_node *node, int strong, int internal,
1215 struct list_head *target_list)
1216{
1217 int ret;
1218
1219 binder_node_inner_lock(node);
1220 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1221 binder_node_inner_unlock(node);
1222
1223 return ret;
1224}
1225
1226static bool binder_dec_node_nilocked(struct binder_node *node,
1227 int strong, int internal)
1228{
1229 struct binder_proc *proc = node->proc;
1230
1231 assert_spin_locked(&node->lock);
1232 if (proc)
1233 assert_spin_locked(&proc->inner_lock);
1234 if (strong) {
1235 if (internal)
1236 node->internal_strong_refs--;
1237 else
1238 node->local_strong_refs--;
1239 if (node->local_strong_refs || node->internal_strong_refs)
1240 return false;
1241 } else {
1242 if (!internal)
1243 node->local_weak_refs--;
1244 if (node->local_weak_refs || node->tmp_refs ||
1245 !hlist_empty(&node->refs))
1246 return false;
1247 }
1248
1249 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1250 if (list_empty(&node->work.entry)) {
1251 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1252 binder_wakeup_proc_ilocked(proc);
1253 }
1254 } else {
1255 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1256 !node->local_weak_refs && !node->tmp_refs) {
1257 if (proc) {
1258 binder_dequeue_work_ilocked(&node->work);
1259 rb_erase(&node->rb_node, &proc->nodes);
1260 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1261 "refless node %d deleted\n",
1262 node->debug_id);
1263 } else {
1264 BUG_ON(!list_empty(&node->work.entry));
1265 spin_lock(&binder_dead_nodes_lock);
1266 /*
1267 * tmp_refs could have changed so
1268 * check it again
1269 */
1270 if (node->tmp_refs) {
1271 spin_unlock(&binder_dead_nodes_lock);
1272 return false;
1273 }
1274 hlist_del(&node->dead_node);
1275 spin_unlock(&binder_dead_nodes_lock);
1276 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1277 "dead node %d deleted\n",
1278 node->debug_id);
1279 }
1280 return true;
1281 }
1282 }
1283 return false;
1284}
1285
1286static void binder_dec_node(struct binder_node *node, int strong, int internal)
1287{
1288 bool free_node;
1289
1290 binder_node_inner_lock(node);
1291 free_node = binder_dec_node_nilocked(node, strong, internal);
1292 binder_node_inner_unlock(node);
1293 if (free_node)
1294 binder_free_node(node);
1295}
1296
1297static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1298{
1299 /*
1300 * No call to binder_inc_node() is needed since we
1301 * don't need to inform userspace of any changes to
1302 * tmp_refs
1303 */
1304 node->tmp_refs++;
1305}
1306
1307/**
1308 * binder_inc_node_tmpref() - take a temporary reference on node
1309 * @node: node to reference
1310 *
1311 * Take reference on node to prevent the node from being freed
1312 * while referenced only by a local variable. The inner lock is
1313 * needed to serialize with the node work on the queue (which
1314 * isn't needed after the node is dead). If the node is dead
1315 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1316 * node->tmp_refs against dead-node-only cases where the node
1317 * lock cannot be acquired (eg traversing the dead node list to
1318 * print nodes)
1319 */
1320static void binder_inc_node_tmpref(struct binder_node *node)
1321{
1322 binder_node_lock(node);
1323 if (node->proc)
1324 binder_inner_proc_lock(node->proc);
1325 else
1326 spin_lock(&binder_dead_nodes_lock);
1327 binder_inc_node_tmpref_ilocked(node);
1328 if (node->proc)
1329 binder_inner_proc_unlock(node->proc);
1330 else
1331 spin_unlock(&binder_dead_nodes_lock);
1332 binder_node_unlock(node);
1333}
1334
1335/**
1336 * binder_dec_node_tmpref() - remove a temporary reference on node
1337 * @node: node to reference
1338 *
1339 * Release temporary reference on node taken via binder_inc_node_tmpref()
1340 */
1341static void binder_dec_node_tmpref(struct binder_node *node)
1342{
1343 bool free_node;
1344
1345 binder_node_inner_lock(node);
1346 if (!node->proc)
1347 spin_lock(&binder_dead_nodes_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001348 else
1349 __acquire(&binder_dead_nodes_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001350 node->tmp_refs--;
1351 BUG_ON(node->tmp_refs < 0);
1352 if (!node->proc)
1353 spin_unlock(&binder_dead_nodes_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001354 else
1355 __release(&binder_dead_nodes_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001356 /*
1357 * Call binder_dec_node() to check if all refcounts are 0
1358 * and cleanup is needed. Calling with strong=0 and internal=1
1359 * causes no actual reference to be released in binder_dec_node().
1360 * If that changes, a change is needed here too.
1361 */
1362 free_node = binder_dec_node_nilocked(node, 0, 1);
1363 binder_node_inner_unlock(node);
1364 if (free_node)
1365 binder_free_node(node);
1366}
1367
1368static void binder_put_node(struct binder_node *node)
1369{
1370 binder_dec_node_tmpref(node);
1371}
1372
1373static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1374 u32 desc, bool need_strong_ref)
1375{
1376 struct rb_node *n = proc->refs_by_desc.rb_node;
1377 struct binder_ref *ref;
1378
1379 while (n) {
1380 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1381
1382 if (desc < ref->data.desc) {
1383 n = n->rb_left;
1384 } else if (desc > ref->data.desc) {
1385 n = n->rb_right;
1386 } else if (need_strong_ref && !ref->data.strong) {
1387 binder_user_error("tried to use weak ref as strong ref\n");
1388 return NULL;
1389 } else {
1390 return ref;
1391 }
1392 }
1393 return NULL;
1394}
1395
1396/**
1397 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1398 * @proc: binder_proc that owns the ref
1399 * @node: binder_node of target
1400 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1401 *
1402 * Look up the ref for the given node and return it if it exists
1403 *
1404 * If it doesn't exist and the caller provides a newly allocated
1405 * ref, initialize the fields of the newly allocated ref and insert
1406 * into the given proc rb_trees and node refs list.
1407 *
1408 * Return: the ref for node. It is possible that another thread
1409 * allocated/initialized the ref first in which case the
1410 * returned ref would be different than the passed-in
1411 * new_ref. new_ref must be kfree'd by the caller in
1412 * this case.
1413 */
1414static struct binder_ref *binder_get_ref_for_node_olocked(
1415 struct binder_proc *proc,
1416 struct binder_node *node,
1417 struct binder_ref *new_ref)
1418{
1419 struct binder_context *context = proc->context;
1420 struct rb_node **p = &proc->refs_by_node.rb_node;
1421 struct rb_node *parent = NULL;
1422 struct binder_ref *ref;
1423 struct rb_node *n;
1424
1425 while (*p) {
1426 parent = *p;
1427 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1428
1429 if (node < ref->node)
1430 p = &(*p)->rb_left;
1431 else if (node > ref->node)
1432 p = &(*p)->rb_right;
1433 else
1434 return ref;
1435 }
1436 if (!new_ref)
1437 return NULL;
1438
1439 binder_stats_created(BINDER_STAT_REF);
1440 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1441 new_ref->proc = proc;
1442 new_ref->node = node;
1443 rb_link_node(&new_ref->rb_node_node, parent, p);
1444 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1445
1446 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1447 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1448 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1449 if (ref->data.desc > new_ref->data.desc)
1450 break;
1451 new_ref->data.desc = ref->data.desc + 1;
1452 }
1453
1454 p = &proc->refs_by_desc.rb_node;
1455 while (*p) {
1456 parent = *p;
1457 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1458
1459 if (new_ref->data.desc < ref->data.desc)
1460 p = &(*p)->rb_left;
1461 else if (new_ref->data.desc > ref->data.desc)
1462 p = &(*p)->rb_right;
1463 else
1464 BUG();
1465 }
1466 rb_link_node(&new_ref->rb_node_desc, parent, p);
1467 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1468
1469 binder_node_lock(node);
1470 hlist_add_head(&new_ref->node_entry, &node->refs);
1471
1472 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1473 "%d new ref %d desc %d for node %d\n",
1474 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1475 node->debug_id);
1476 binder_node_unlock(node);
1477 return new_ref;
1478}
1479
1480static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1481{
1482 bool delete_node = false;
1483
1484 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1485 "%d delete ref %d desc %d for node %d\n",
1486 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1487 ref->node->debug_id);
1488
1489 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1490 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1491
1492 binder_node_inner_lock(ref->node);
1493 if (ref->data.strong)
1494 binder_dec_node_nilocked(ref->node, 1, 1);
1495
1496 hlist_del(&ref->node_entry);
1497 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1498 binder_node_inner_unlock(ref->node);
1499 /*
1500 * Clear ref->node unless we want the caller to free the node
1501 */
1502 if (!delete_node) {
1503 /*
1504 * The caller uses ref->node to determine
1505 * whether the node needs to be freed. Clear
1506 * it since the node is still alive.
1507 */
1508 ref->node = NULL;
1509 }
1510
1511 if (ref->death) {
1512 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1513 "%d delete ref %d desc %d has death notification\n",
1514 ref->proc->pid, ref->data.debug_id,
1515 ref->data.desc);
1516 binder_dequeue_work(ref->proc, &ref->death->work);
1517 binder_stats_deleted(BINDER_STAT_DEATH);
1518 }
1519 binder_stats_deleted(BINDER_STAT_REF);
1520}
1521
1522/**
1523 * binder_inc_ref_olocked() - increment the ref for given handle
1524 * @ref: ref to be incremented
1525 * @strong: if true, strong increment, else weak
1526 * @target_list: list to queue node work on
1527 *
1528 * Increment the ref. @ref->proc->outer_lock must be held on entry
1529 *
1530 * Return: 0, if successful, else errno
1531 */
1532static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1533 struct list_head *target_list)
1534{
1535 int ret;
1536
1537 if (strong) {
1538 if (ref->data.strong == 0) {
1539 ret = binder_inc_node(ref->node, 1, 1, target_list);
1540 if (ret)
1541 return ret;
1542 }
1543 ref->data.strong++;
1544 } else {
1545 if (ref->data.weak == 0) {
1546 ret = binder_inc_node(ref->node, 0, 1, target_list);
1547 if (ret)
1548 return ret;
1549 }
1550 ref->data.weak++;
1551 }
1552 return 0;
1553}
1554
1555/**
1556 * binder_dec_ref() - dec the ref for given handle
1557 * @ref: ref to be decremented
1558 * @strong: if true, strong decrement, else weak
1559 *
1560 * Decrement the ref.
1561 *
1562 * Return: true if ref is cleaned up and ready to be freed
1563 */
1564static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1565{
1566 if (strong) {
1567 if (ref->data.strong == 0) {
1568 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1569 ref->proc->pid, ref->data.debug_id,
1570 ref->data.desc, ref->data.strong,
1571 ref->data.weak);
1572 return false;
1573 }
1574 ref->data.strong--;
1575 if (ref->data.strong == 0)
1576 binder_dec_node(ref->node, strong, 1);
1577 } else {
1578 if (ref->data.weak == 0) {
1579 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1580 ref->proc->pid, ref->data.debug_id,
1581 ref->data.desc, ref->data.strong,
1582 ref->data.weak);
1583 return false;
1584 }
1585 ref->data.weak--;
1586 }
1587 if (ref->data.strong == 0 && ref->data.weak == 0) {
1588 binder_cleanup_ref_olocked(ref);
1589 return true;
1590 }
1591 return false;
1592}
1593
1594/**
1595 * binder_get_node_from_ref() - get the node from the given proc/desc
1596 * @proc: proc containing the ref
1597 * @desc: the handle associated with the ref
1598 * @need_strong_ref: if true, only return node if ref is strong
1599 * @rdata: the id/refcount data for the ref
1600 *
1601 * Given a proc and ref handle, return the associated binder_node
1602 *
1603 * Return: a binder_node or NULL if not found or not strong when strong required
1604 */
1605static struct binder_node *binder_get_node_from_ref(
1606 struct binder_proc *proc,
1607 u32 desc, bool need_strong_ref,
1608 struct binder_ref_data *rdata)
1609{
1610 struct binder_node *node;
1611 struct binder_ref *ref;
1612
1613 binder_proc_lock(proc);
1614 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1615 if (!ref)
1616 goto err_no_ref;
1617 node = ref->node;
1618 /*
1619 * Take an implicit reference on the node to ensure
1620 * it stays alive until the call to binder_put_node()
1621 */
1622 binder_inc_node_tmpref(node);
1623 if (rdata)
1624 *rdata = ref->data;
1625 binder_proc_unlock(proc);
1626
1627 return node;
1628
1629err_no_ref:
1630 binder_proc_unlock(proc);
1631 return NULL;
1632}
1633
1634/**
1635 * binder_free_ref() - free the binder_ref
1636 * @ref: ref to free
1637 *
1638 * Free the binder_ref. Free the binder_node indicated by ref->node
1639 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1640 */
1641static void binder_free_ref(struct binder_ref *ref)
1642{
1643 if (ref->node)
1644 binder_free_node(ref->node);
1645 kfree(ref->death);
1646 kfree(ref);
1647}
1648
1649/**
1650 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1651 * @proc: proc containing the ref
1652 * @desc: the handle associated with the ref
1653 * @increment: true=inc reference, false=dec reference
1654 * @strong: true=strong reference, false=weak reference
1655 * @rdata: the id/refcount data for the ref
1656 *
1657 * Given a proc and ref handle, increment or decrement the ref
1658 * according to "increment" arg.
1659 *
1660 * Return: 0 if successful, else errno
1661 */
1662static int binder_update_ref_for_handle(struct binder_proc *proc,
1663 uint32_t desc, bool increment, bool strong,
1664 struct binder_ref_data *rdata)
1665{
1666 int ret = 0;
1667 struct binder_ref *ref;
1668 bool delete_ref = false;
1669
1670 binder_proc_lock(proc);
1671 ref = binder_get_ref_olocked(proc, desc, strong);
1672 if (!ref) {
1673 ret = -EINVAL;
1674 goto err_no_ref;
1675 }
1676 if (increment)
1677 ret = binder_inc_ref_olocked(ref, strong, NULL);
1678 else
1679 delete_ref = binder_dec_ref_olocked(ref, strong);
1680
1681 if (rdata)
1682 *rdata = ref->data;
1683 binder_proc_unlock(proc);
1684
1685 if (delete_ref)
1686 binder_free_ref(ref);
1687 return ret;
1688
1689err_no_ref:
1690 binder_proc_unlock(proc);
1691 return ret;
1692}
1693
1694/**
1695 * binder_dec_ref_for_handle() - dec the ref for given handle
1696 * @proc: proc containing the ref
1697 * @desc: the handle associated with the ref
1698 * @strong: true=strong reference, false=weak reference
1699 * @rdata: the id/refcount data for the ref
1700 *
1701 * Just calls binder_update_ref_for_handle() to decrement the ref.
1702 *
1703 * Return: 0 if successful, else errno
1704 */
1705static int binder_dec_ref_for_handle(struct binder_proc *proc,
1706 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1707{
1708 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1709}
1710
1711
1712/**
1713 * binder_inc_ref_for_node() - increment the ref for given proc/node
1714 * @proc: proc containing the ref
1715 * @node: target node
1716 * @strong: true=strong reference, false=weak reference
1717 * @target_list: worklist to use if node is incremented
1718 * @rdata: the id/refcount data for the ref
1719 *
1720 * Given a proc and node, increment the ref. Create the ref if it
1721 * doesn't already exist
1722 *
1723 * Return: 0 if successful, else errno
1724 */
1725static int binder_inc_ref_for_node(struct binder_proc *proc,
1726 struct binder_node *node,
1727 bool strong,
1728 struct list_head *target_list,
1729 struct binder_ref_data *rdata)
1730{
1731 struct binder_ref *ref;
1732 struct binder_ref *new_ref = NULL;
1733 int ret = 0;
1734
1735 binder_proc_lock(proc);
1736 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1737 if (!ref) {
1738 binder_proc_unlock(proc);
1739 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1740 if (!new_ref)
1741 return -ENOMEM;
1742 binder_proc_lock(proc);
1743 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1744 }
1745 ret = binder_inc_ref_olocked(ref, strong, target_list);
1746 *rdata = ref->data;
Olivier Deprez92d4c212022-12-06 15:05:30 +01001747 if (ret && ref == new_ref) {
1748 /*
1749 * Cleanup the failed reference here as the target
1750 * could now be dead and have already released its
1751 * references by now. Calling on the new reference
1752 * with strong=0 and a tmp_refs will not decrement
1753 * the node. The new_ref gets kfree'd below.
1754 */
1755 binder_cleanup_ref_olocked(new_ref);
1756 ref = NULL;
1757 }
1758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001759 binder_proc_unlock(proc);
1760 if (new_ref && ref != new_ref)
1761 /*
1762 * Another thread created the ref first so
1763 * free the one we allocated
1764 */
1765 kfree(new_ref);
1766 return ret;
1767}
1768
1769static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1770 struct binder_transaction *t)
1771{
1772 BUG_ON(!target_thread);
1773 assert_spin_locked(&target_thread->proc->inner_lock);
1774 BUG_ON(target_thread->transaction_stack != t);
1775 BUG_ON(target_thread->transaction_stack->from != target_thread);
1776 target_thread->transaction_stack =
1777 target_thread->transaction_stack->from_parent;
1778 t->from = NULL;
1779}
1780
1781/**
1782 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1783 * @thread: thread to decrement
1784 *
1785 * A thread needs to be kept alive while being used to create or
1786 * handle a transaction. binder_get_txn_from() is used to safely
1787 * extract t->from from a binder_transaction and keep the thread
1788 * indicated by t->from from being freed. When done with that
1789 * binder_thread, this function is called to decrement the
1790 * tmp_ref and free if appropriate (thread has been released
1791 * and no transaction being processed by the driver)
1792 */
1793static void binder_thread_dec_tmpref(struct binder_thread *thread)
1794{
1795 /*
1796 * atomic is used to protect the counter value while
1797 * it cannot reach zero or thread->is_dead is false
1798 */
1799 binder_inner_proc_lock(thread->proc);
1800 atomic_dec(&thread->tmp_ref);
1801 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1802 binder_inner_proc_unlock(thread->proc);
1803 binder_free_thread(thread);
1804 return;
1805 }
1806 binder_inner_proc_unlock(thread->proc);
1807}
1808
1809/**
1810 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1811 * @proc: proc to decrement
1812 *
1813 * A binder_proc needs to be kept alive while being used to create or
1814 * handle a transaction. proc->tmp_ref is incremented when
1815 * creating a new transaction or the binder_proc is currently in-use
1816 * by threads that are being released. When done with the binder_proc,
1817 * this function is called to decrement the counter and free the
1818 * proc if appropriate (proc has been released, all threads have
1819 * been released and not currenly in-use to process a transaction).
1820 */
1821static void binder_proc_dec_tmpref(struct binder_proc *proc)
1822{
1823 binder_inner_proc_lock(proc);
1824 proc->tmp_ref--;
1825 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1826 !proc->tmp_ref) {
1827 binder_inner_proc_unlock(proc);
1828 binder_free_proc(proc);
1829 return;
1830 }
1831 binder_inner_proc_unlock(proc);
1832}
1833
1834/**
1835 * binder_get_txn_from() - safely extract the "from" thread in transaction
1836 * @t: binder transaction for t->from
1837 *
1838 * Atomically return the "from" thread and increment the tmp_ref
1839 * count for the thread to ensure it stays alive until
1840 * binder_thread_dec_tmpref() is called.
1841 *
1842 * Return: the value of t->from
1843 */
1844static struct binder_thread *binder_get_txn_from(
1845 struct binder_transaction *t)
1846{
1847 struct binder_thread *from;
1848
1849 spin_lock(&t->lock);
1850 from = t->from;
1851 if (from)
1852 atomic_inc(&from->tmp_ref);
1853 spin_unlock(&t->lock);
1854 return from;
1855}
1856
1857/**
1858 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1859 * @t: binder transaction for t->from
1860 *
1861 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1862 * to guarantee that the thread cannot be released while operating on it.
1863 * The caller must call binder_inner_proc_unlock() to release the inner lock
1864 * as well as call binder_dec_thread_txn() to release the reference.
1865 *
1866 * Return: the value of t->from
1867 */
1868static struct binder_thread *binder_get_txn_from_and_acq_inner(
1869 struct binder_transaction *t)
David Brazdil0f672f62019-12-10 10:32:29 +00001870 __acquires(&t->from->proc->inner_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001871{
1872 struct binder_thread *from;
1873
1874 from = binder_get_txn_from(t);
David Brazdil0f672f62019-12-10 10:32:29 +00001875 if (!from) {
1876 __acquire(&from->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001877 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001878 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001879 binder_inner_proc_lock(from->proc);
1880 if (t->from) {
1881 BUG_ON(from != t->from);
1882 return from;
1883 }
1884 binder_inner_proc_unlock(from->proc);
David Brazdil0f672f62019-12-10 10:32:29 +00001885 __acquire(&from->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001886 binder_thread_dec_tmpref(from);
1887 return NULL;
1888}
1889
David Brazdil0f672f62019-12-10 10:32:29 +00001890/**
1891 * binder_free_txn_fixups() - free unprocessed fd fixups
1892 * @t: binder transaction for t->from
1893 *
1894 * If the transaction is being torn down prior to being
1895 * processed by the target process, free all of the
1896 * fd fixups and fput the file structs. It is safe to
1897 * call this function after the fixups have been
1898 * processed -- in that case, the list will be empty.
1899 */
1900static void binder_free_txn_fixups(struct binder_transaction *t)
1901{
1902 struct binder_txn_fd_fixup *fixup, *tmp;
1903
1904 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1905 fput(fixup->file);
1906 list_del(&fixup->fixup_entry);
1907 kfree(fixup);
1908 }
1909}
1910
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001911static void binder_free_transaction(struct binder_transaction *t)
1912{
David Brazdil0f672f62019-12-10 10:32:29 +00001913 struct binder_proc *target_proc = t->to_proc;
1914
1915 if (target_proc) {
1916 binder_inner_proc_lock(target_proc);
1917 if (t->buffer)
1918 t->buffer->transaction = NULL;
1919 binder_inner_proc_unlock(target_proc);
1920 }
1921 /*
1922 * If the transaction has no target_proc, then
1923 * t->buffer->transaction has already been cleared.
1924 */
1925 binder_free_txn_fixups(t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001926 kfree(t);
1927 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1928}
1929
1930static void binder_send_failed_reply(struct binder_transaction *t,
1931 uint32_t error_code)
1932{
1933 struct binder_thread *target_thread;
1934 struct binder_transaction *next;
1935
1936 BUG_ON(t->flags & TF_ONE_WAY);
1937 while (1) {
1938 target_thread = binder_get_txn_from_and_acq_inner(t);
1939 if (target_thread) {
1940 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1941 "send failed reply for transaction %d to %d:%d\n",
1942 t->debug_id,
1943 target_thread->proc->pid,
1944 target_thread->pid);
1945
1946 binder_pop_transaction_ilocked(target_thread, t);
1947 if (target_thread->reply_error.cmd == BR_OK) {
1948 target_thread->reply_error.cmd = error_code;
1949 binder_enqueue_thread_work_ilocked(
1950 target_thread,
1951 &target_thread->reply_error.work);
1952 wake_up_interruptible(&target_thread->wait);
1953 } else {
1954 /*
1955 * Cannot get here for normal operation, but
1956 * we can if multiple synchronous transactions
1957 * are sent without blocking for responses.
1958 * Just ignore the 2nd error in this case.
1959 */
1960 pr_warn("Unexpected reply error: %u\n",
1961 target_thread->reply_error.cmd);
1962 }
1963 binder_inner_proc_unlock(target_thread->proc);
1964 binder_thread_dec_tmpref(target_thread);
1965 binder_free_transaction(t);
1966 return;
1967 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001968 __release(&target_thread->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001969 next = t->from_parent;
1970
1971 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1972 "send failed reply for transaction %d, target dead\n",
1973 t->debug_id);
1974
1975 binder_free_transaction(t);
1976 if (next == NULL) {
1977 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1978 "reply failed, no target thread at root\n");
1979 return;
1980 }
1981 t = next;
1982 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1983 "reply failed, no target thread -- retry %d\n",
1984 t->debug_id);
1985 }
1986}
1987
1988/**
1989 * binder_cleanup_transaction() - cleans up undelivered transaction
1990 * @t: transaction that needs to be cleaned up
1991 * @reason: reason the transaction wasn't delivered
1992 * @error_code: error to return to caller (if synchronous call)
1993 */
1994static void binder_cleanup_transaction(struct binder_transaction *t,
1995 const char *reason,
1996 uint32_t error_code)
1997{
1998 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1999 binder_send_failed_reply(t, error_code);
2000 } else {
2001 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2002 "undelivered transaction %d, %s\n",
2003 t->debug_id, reason);
2004 binder_free_transaction(t);
2005 }
2006}
2007
2008/**
David Brazdil0f672f62019-12-10 10:32:29 +00002009 * binder_get_object() - gets object and checks for valid metadata
2010 * @proc: binder_proc owning the buffer
Olivier Deprez92d4c212022-12-06 15:05:30 +01002011 * @u: sender's user pointer to base of buffer
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002012 * @buffer: binder_buffer that we're parsing.
David Brazdil0f672f62019-12-10 10:32:29 +00002013 * @offset: offset in the @buffer at which to validate an object.
2014 * @object: struct binder_object to read into
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002015 *
Olivier Deprez92d4c212022-12-06 15:05:30 +01002016 * Copy the binder object at the given offset into @object. If @u is
2017 * provided then the copy is from the sender's buffer. If not, then
2018 * it is copied from the target's @buffer.
2019 *
2020 * Return: If there's a valid metadata object at @offset, the
David Brazdil0f672f62019-12-10 10:32:29 +00002021 * size of that object. Otherwise, it returns zero. The object
2022 * is read into the struct binder_object pointed to by @object.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002023 */
David Brazdil0f672f62019-12-10 10:32:29 +00002024static size_t binder_get_object(struct binder_proc *proc,
Olivier Deprez92d4c212022-12-06 15:05:30 +01002025 const void __user *u,
David Brazdil0f672f62019-12-10 10:32:29 +00002026 struct binder_buffer *buffer,
2027 unsigned long offset,
2028 struct binder_object *object)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002029{
David Brazdil0f672f62019-12-10 10:32:29 +00002030 size_t read_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002031 struct binder_object_header *hdr;
2032 size_t object_size = 0;
2033
David Brazdil0f672f62019-12-10 10:32:29 +00002034 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
Olivier Deprez92d4c212022-12-06 15:05:30 +01002035 if (offset > buffer->data_size || read_size < sizeof(*hdr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036 return 0;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002037 if (u) {
2038 if (copy_from_user(object, u + offset, read_size))
2039 return 0;
2040 } else {
2041 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2042 offset, read_size))
2043 return 0;
2044 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002045
David Brazdil0f672f62019-12-10 10:32:29 +00002046 /* Ok, now see if we read a complete object. */
2047 hdr = &object->hdr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002048 switch (hdr->type) {
2049 case BINDER_TYPE_BINDER:
2050 case BINDER_TYPE_WEAK_BINDER:
2051 case BINDER_TYPE_HANDLE:
2052 case BINDER_TYPE_WEAK_HANDLE:
2053 object_size = sizeof(struct flat_binder_object);
2054 break;
2055 case BINDER_TYPE_FD:
2056 object_size = sizeof(struct binder_fd_object);
2057 break;
2058 case BINDER_TYPE_PTR:
2059 object_size = sizeof(struct binder_buffer_object);
2060 break;
2061 case BINDER_TYPE_FDA:
2062 object_size = sizeof(struct binder_fd_array_object);
2063 break;
2064 default:
2065 return 0;
2066 }
2067 if (offset <= buffer->data_size - object_size &&
2068 buffer->data_size >= object_size)
2069 return object_size;
2070 else
2071 return 0;
2072}
2073
2074/**
2075 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
David Brazdil0f672f62019-12-10 10:32:29 +00002076 * @proc: binder_proc owning the buffer
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002077 * @b: binder_buffer containing the object
David Brazdil0f672f62019-12-10 10:32:29 +00002078 * @object: struct binder_object to read into
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002079 * @index: index in offset array at which the binder_buffer_object is
2080 * located
David Brazdil0f672f62019-12-10 10:32:29 +00002081 * @start_offset: points to the start of the offset array
2082 * @object_offsetp: offset of @object read from @b
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002083 * @num_valid: the number of valid offsets in the offset array
2084 *
2085 * Return: If @index is within the valid range of the offset array
2086 * described by @start and @num_valid, and if there's a valid
2087 * binder_buffer_object at the offset found in index @index
2088 * of the offset array, that object is returned. Otherwise,
2089 * %NULL is returned.
2090 * Note that the offset found in index @index itself is not
2091 * verified; this function assumes that @num_valid elements
2092 * from @start were previously verified to have valid offsets.
David Brazdil0f672f62019-12-10 10:32:29 +00002093 * If @object_offsetp is non-NULL, then the offset within
2094 * @b is written to it.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002095 */
David Brazdil0f672f62019-12-10 10:32:29 +00002096static struct binder_buffer_object *binder_validate_ptr(
2097 struct binder_proc *proc,
2098 struct binder_buffer *b,
2099 struct binder_object *object,
2100 binder_size_t index,
2101 binder_size_t start_offset,
2102 binder_size_t *object_offsetp,
2103 binder_size_t num_valid)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002104{
David Brazdil0f672f62019-12-10 10:32:29 +00002105 size_t object_size;
2106 binder_size_t object_offset;
2107 unsigned long buffer_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002108
2109 if (index >= num_valid)
2110 return NULL;
2111
David Brazdil0f672f62019-12-10 10:32:29 +00002112 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2113 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2114 b, buffer_offset,
2115 sizeof(object_offset)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002116 return NULL;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002117 object_size = binder_get_object(proc, NULL, b, object_offset, object);
David Brazdil0f672f62019-12-10 10:32:29 +00002118 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2119 return NULL;
2120 if (object_offsetp)
2121 *object_offsetp = object_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002122
David Brazdil0f672f62019-12-10 10:32:29 +00002123 return &object->bbo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002124}
2125
2126/**
2127 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
David Brazdil0f672f62019-12-10 10:32:29 +00002128 * @proc: binder_proc owning the buffer
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002129 * @b: transaction buffer
David Brazdil0f672f62019-12-10 10:32:29 +00002130 * @objects_start_offset: offset to start of objects buffer
2131 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2132 * @fixup_offset: start offset in @buffer to fix up
2133 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2134 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002135 *
2136 * Return: %true if a fixup in buffer @buffer at offset @offset is
2137 * allowed.
2138 *
2139 * For safety reasons, we only allow fixups inside a buffer to happen
2140 * at increasing offsets; additionally, we only allow fixup on the last
2141 * buffer object that was verified, or one of its parents.
2142 *
2143 * Example of what is allowed:
2144 *
2145 * A
2146 * B (parent = A, offset = 0)
2147 * C (parent = A, offset = 16)
2148 * D (parent = C, offset = 0)
2149 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2150 *
2151 * Examples of what is not allowed:
2152 *
2153 * Decreasing offsets within the same parent:
2154 * A
2155 * C (parent = A, offset = 16)
2156 * B (parent = A, offset = 0) // decreasing offset within A
2157 *
2158 * Referring to a parent that wasn't the last object or any of its parents:
2159 * A
2160 * B (parent = A, offset = 0)
2161 * C (parent = A, offset = 0)
2162 * C (parent = A, offset = 16)
2163 * D (parent = B, offset = 0) // B is not A or any of A's parents
2164 */
David Brazdil0f672f62019-12-10 10:32:29 +00002165static bool binder_validate_fixup(struct binder_proc *proc,
2166 struct binder_buffer *b,
2167 binder_size_t objects_start_offset,
2168 binder_size_t buffer_obj_offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002169 binder_size_t fixup_offset,
David Brazdil0f672f62019-12-10 10:32:29 +00002170 binder_size_t last_obj_offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002171 binder_size_t last_min_offset)
2172{
David Brazdil0f672f62019-12-10 10:32:29 +00002173 if (!last_obj_offset) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002174 /* Nothing to fix up in */
2175 return false;
2176 }
2177
David Brazdil0f672f62019-12-10 10:32:29 +00002178 while (last_obj_offset != buffer_obj_offset) {
2179 unsigned long buffer_offset;
2180 struct binder_object last_object;
2181 struct binder_buffer_object *last_bbo;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002182 size_t object_size = binder_get_object(proc, NULL, b,
2183 last_obj_offset,
David Brazdil0f672f62019-12-10 10:32:29 +00002184 &last_object);
2185 if (object_size != sizeof(*last_bbo))
2186 return false;
2187
2188 last_bbo = &last_object.bbo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002189 /*
2190 * Safe to retrieve the parent of last_obj, since it
2191 * was already previously verified by the driver.
2192 */
David Brazdil0f672f62019-12-10 10:32:29 +00002193 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002194 return false;
David Brazdil0f672f62019-12-10 10:32:29 +00002195 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2196 buffer_offset = objects_start_offset +
2197 sizeof(binder_size_t) * last_bbo->parent;
2198 if (binder_alloc_copy_from_buffer(&proc->alloc,
2199 &last_obj_offset,
2200 b, buffer_offset,
2201 sizeof(last_obj_offset)))
2202 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002203 }
2204 return (fixup_offset >= last_min_offset);
2205}
2206
David Brazdil0f672f62019-12-10 10:32:29 +00002207/**
2208 * struct binder_task_work_cb - for deferred close
2209 *
2210 * @twork: callback_head for task work
2211 * @fd: fd to close
2212 *
2213 * Structure to pass task work to be handled after
2214 * returning from binder_ioctl() via task_work_add().
2215 */
2216struct binder_task_work_cb {
2217 struct callback_head twork;
2218 struct file *file;
2219};
2220
2221/**
2222 * binder_do_fd_close() - close list of file descriptors
2223 * @twork: callback head for task work
2224 *
2225 * It is not safe to call ksys_close() during the binder_ioctl()
2226 * function if there is a chance that binder's own file descriptor
2227 * might be closed. This is to meet the requirements for using
2228 * fdget() (see comments for __fget_light()). Therefore use
2229 * task_work_add() to schedule the close operation once we have
2230 * returned from binder_ioctl(). This function is a callback
2231 * for that mechanism and does the actual ksys_close() on the
2232 * given file descriptor.
2233 */
2234static void binder_do_fd_close(struct callback_head *twork)
2235{
2236 struct binder_task_work_cb *twcb = container_of(twork,
2237 struct binder_task_work_cb, twork);
2238
2239 fput(twcb->file);
2240 kfree(twcb);
2241}
2242
2243/**
2244 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2245 * @fd: file-descriptor to close
2246 *
2247 * See comments in binder_do_fd_close(). This function is used to schedule
2248 * a file-descriptor to be closed after returning from binder_ioctl().
2249 */
2250static void binder_deferred_fd_close(int fd)
2251{
2252 struct binder_task_work_cb *twcb;
2253
2254 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2255 if (!twcb)
2256 return;
2257 init_task_work(&twcb->twork, binder_do_fd_close);
2258 __close_fd_get_file(fd, &twcb->file);
Olivier Deprez157378f2022-04-04 15:47:50 +02002259 if (twcb->file) {
2260 filp_close(twcb->file, current->files);
2261 task_work_add(current, &twcb->twork, TWA_RESUME);
2262 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00002263 kfree(twcb);
Olivier Deprez157378f2022-04-04 15:47:50 +02002264 }
David Brazdil0f672f62019-12-10 10:32:29 +00002265}
2266
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002267static void binder_transaction_buffer_release(struct binder_proc *proc,
Olivier Deprez157378f2022-04-04 15:47:50 +02002268 struct binder_thread *thread,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002269 struct binder_buffer *buffer,
David Brazdil0f672f62019-12-10 10:32:29 +00002270 binder_size_t failed_at,
2271 bool is_failure)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002272{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002273 int debug_id = buffer->debug_id;
David Brazdil0f672f62019-12-10 10:32:29 +00002274 binder_size_t off_start_offset, buffer_offset, off_end_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002275
2276 binder_debug(BINDER_DEBUG_TRANSACTION,
David Brazdil0f672f62019-12-10 10:32:29 +00002277 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 proc->pid, buffer->debug_id,
David Brazdil0f672f62019-12-10 10:32:29 +00002279 buffer->data_size, buffer->offsets_size,
2280 (unsigned long long)failed_at);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002281
2282 if (buffer->target_node)
2283 binder_dec_node(buffer->target_node, 1, 0);
2284
David Brazdil0f672f62019-12-10 10:32:29 +00002285 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
Olivier Deprez157378f2022-04-04 15:47:50 +02002286 off_end_offset = is_failure && failed_at ? failed_at :
David Brazdil0f672f62019-12-10 10:32:29 +00002287 off_start_offset + buffer->offsets_size;
2288 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2289 buffer_offset += sizeof(binder_size_t)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002290 struct binder_object_header *hdr;
David Brazdil0f672f62019-12-10 10:32:29 +00002291 size_t object_size = 0;
2292 struct binder_object object;
2293 binder_size_t object_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002294
David Brazdil0f672f62019-12-10 10:32:29 +00002295 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2296 buffer, buffer_offset,
2297 sizeof(object_offset)))
Olivier Deprez92d4c212022-12-06 15:05:30 +01002298 object_size = binder_get_object(proc, NULL, buffer,
David Brazdil0f672f62019-12-10 10:32:29 +00002299 object_offset, &object);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002300 if (object_size == 0) {
2301 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
David Brazdil0f672f62019-12-10 10:32:29 +00002302 debug_id, (u64)object_offset, buffer->data_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002303 continue;
2304 }
David Brazdil0f672f62019-12-10 10:32:29 +00002305 hdr = &object.hdr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002306 switch (hdr->type) {
2307 case BINDER_TYPE_BINDER:
2308 case BINDER_TYPE_WEAK_BINDER: {
2309 struct flat_binder_object *fp;
2310 struct binder_node *node;
2311
2312 fp = to_flat_binder_object(hdr);
2313 node = binder_get_node(proc, fp->binder);
2314 if (node == NULL) {
2315 pr_err("transaction release %d bad node %016llx\n",
2316 debug_id, (u64)fp->binder);
2317 break;
2318 }
2319 binder_debug(BINDER_DEBUG_TRANSACTION,
2320 " node %d u%016llx\n",
2321 node->debug_id, (u64)node->ptr);
2322 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2323 0);
2324 binder_put_node(node);
2325 } break;
2326 case BINDER_TYPE_HANDLE:
2327 case BINDER_TYPE_WEAK_HANDLE: {
2328 struct flat_binder_object *fp;
2329 struct binder_ref_data rdata;
2330 int ret;
2331
2332 fp = to_flat_binder_object(hdr);
2333 ret = binder_dec_ref_for_handle(proc, fp->handle,
2334 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2335
2336 if (ret) {
2337 pr_err("transaction release %d bad handle %d, ret = %d\n",
2338 debug_id, fp->handle, ret);
2339 break;
2340 }
2341 binder_debug(BINDER_DEBUG_TRANSACTION,
2342 " ref %d desc %d\n",
2343 rdata.debug_id, rdata.desc);
2344 } break;
2345
2346 case BINDER_TYPE_FD: {
David Brazdil0f672f62019-12-10 10:32:29 +00002347 /*
2348 * No need to close the file here since user-space
2349 * closes it for for successfully delivered
2350 * transactions. For transactions that weren't
2351 * delivered, the new fd was never allocated so
2352 * there is no need to close and the fput on the
2353 * file is done when the transaction is torn
2354 * down.
2355 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002356 } break;
2357 case BINDER_TYPE_PTR:
2358 /*
2359 * Nothing to do here, this will get cleaned up when the
2360 * transaction buffer gets freed
2361 */
2362 break;
2363 case BINDER_TYPE_FDA: {
2364 struct binder_fd_array_object *fda;
2365 struct binder_buffer_object *parent;
David Brazdil0f672f62019-12-10 10:32:29 +00002366 struct binder_object ptr_object;
2367 binder_size_t fda_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002368 size_t fd_index;
2369 binder_size_t fd_buf_size;
David Brazdil0f672f62019-12-10 10:32:29 +00002370 binder_size_t num_valid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002371
Olivier Deprez157378f2022-04-04 15:47:50 +02002372 if (is_failure) {
David Brazdil0f672f62019-12-10 10:32:29 +00002373 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002374 * The fd fixups have not been applied so no
2375 * fds need to be closed.
2376 */
2377 continue;
2378 }
2379
2380 num_valid = (buffer_offset - off_start_offset) /
2381 sizeof(binder_size_t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002382 fda = to_binder_fd_array_object(hdr);
David Brazdil0f672f62019-12-10 10:32:29 +00002383 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2384 fda->parent,
2385 off_start_offset,
2386 NULL,
2387 num_valid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002388 if (!parent) {
2389 pr_err("transaction release %d bad parent offset\n",
2390 debug_id);
2391 continue;
2392 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002393 fd_buf_size = sizeof(u32) * fda->num_fds;
2394 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2395 pr_err("transaction release %d invalid number of fds (%lld)\n",
2396 debug_id, (u64)fda->num_fds);
2397 continue;
2398 }
2399 if (fd_buf_size > parent->length ||
2400 fda->parent_offset > parent->length - fd_buf_size) {
2401 /* No space for all file descriptors here. */
2402 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2403 debug_id, (u64)fda->num_fds);
2404 continue;
2405 }
David Brazdil0f672f62019-12-10 10:32:29 +00002406 /*
2407 * the source data for binder_buffer_object is visible
2408 * to user-space and the @buffer element is the user
2409 * pointer to the buffer_object containing the fd_array.
2410 * Convert the address to an offset relative to
2411 * the base of the transaction buffer.
2412 */
2413 fda_offset =
2414 (parent->buffer - (uintptr_t)buffer->user_data) +
2415 fda->parent_offset;
2416 for (fd_index = 0; fd_index < fda->num_fds;
2417 fd_index++) {
2418 u32 fd;
2419 int err;
2420 binder_size_t offset = fda_offset +
2421 fd_index * sizeof(fd);
2422
2423 err = binder_alloc_copy_from_buffer(
2424 &proc->alloc, &fd, buffer,
2425 offset, sizeof(fd));
2426 WARN_ON(err);
Olivier Deprez157378f2022-04-04 15:47:50 +02002427 if (!err) {
David Brazdil0f672f62019-12-10 10:32:29 +00002428 binder_deferred_fd_close(fd);
Olivier Deprez157378f2022-04-04 15:47:50 +02002429 /*
2430 * Need to make sure the thread goes
2431 * back to userspace to complete the
2432 * deferred close
2433 */
2434 if (thread)
2435 thread->looper_need_return = true;
2436 }
David Brazdil0f672f62019-12-10 10:32:29 +00002437 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002438 } break;
2439 default:
2440 pr_err("transaction release %d bad object type %x\n",
2441 debug_id, hdr->type);
2442 break;
2443 }
2444 }
2445}
2446
2447static int binder_translate_binder(struct flat_binder_object *fp,
2448 struct binder_transaction *t,
2449 struct binder_thread *thread)
2450{
2451 struct binder_node *node;
2452 struct binder_proc *proc = thread->proc;
2453 struct binder_proc *target_proc = t->to_proc;
2454 struct binder_ref_data rdata;
2455 int ret = 0;
2456
2457 node = binder_get_node(proc, fp->binder);
2458 if (!node) {
2459 node = binder_new_node(proc, fp);
2460 if (!node)
2461 return -ENOMEM;
2462 }
2463 if (fp->cookie != node->cookie) {
2464 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2465 proc->pid, thread->pid, (u64)fp->binder,
2466 node->debug_id, (u64)fp->cookie,
2467 (u64)node->cookie);
2468 ret = -EINVAL;
2469 goto done;
2470 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002471 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002472 ret = -EPERM;
2473 goto done;
2474 }
2475
2476 ret = binder_inc_ref_for_node(target_proc, node,
2477 fp->hdr.type == BINDER_TYPE_BINDER,
2478 &thread->todo, &rdata);
2479 if (ret)
2480 goto done;
2481
2482 if (fp->hdr.type == BINDER_TYPE_BINDER)
2483 fp->hdr.type = BINDER_TYPE_HANDLE;
2484 else
2485 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2486 fp->binder = 0;
2487 fp->handle = rdata.desc;
2488 fp->cookie = 0;
2489
2490 trace_binder_transaction_node_to_ref(t, node, &rdata);
2491 binder_debug(BINDER_DEBUG_TRANSACTION,
2492 " node %d u%016llx -> ref %d desc %d\n",
2493 node->debug_id, (u64)node->ptr,
2494 rdata.debug_id, rdata.desc);
2495done:
2496 binder_put_node(node);
2497 return ret;
2498}
2499
2500static int binder_translate_handle(struct flat_binder_object *fp,
2501 struct binder_transaction *t,
2502 struct binder_thread *thread)
2503{
2504 struct binder_proc *proc = thread->proc;
2505 struct binder_proc *target_proc = t->to_proc;
2506 struct binder_node *node;
2507 struct binder_ref_data src_rdata;
2508 int ret = 0;
2509
2510 node = binder_get_node_from_ref(proc, fp->handle,
2511 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2512 if (!node) {
2513 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2514 proc->pid, thread->pid, fp->handle);
2515 return -EINVAL;
2516 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002517 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002518 ret = -EPERM;
2519 goto done;
2520 }
2521
2522 binder_node_lock(node);
2523 if (node->proc == target_proc) {
2524 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2525 fp->hdr.type = BINDER_TYPE_BINDER;
2526 else
2527 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2528 fp->binder = node->ptr;
2529 fp->cookie = node->cookie;
2530 if (node->proc)
2531 binder_inner_proc_lock(node->proc);
David Brazdil0f672f62019-12-10 10:32:29 +00002532 else
2533 __acquire(&node->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002534 binder_inc_node_nilocked(node,
2535 fp->hdr.type == BINDER_TYPE_BINDER,
2536 0, NULL);
2537 if (node->proc)
2538 binder_inner_proc_unlock(node->proc);
David Brazdil0f672f62019-12-10 10:32:29 +00002539 else
2540 __release(&node->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002541 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2542 binder_debug(BINDER_DEBUG_TRANSACTION,
2543 " ref %d desc %d -> node %d u%016llx\n",
2544 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2545 (u64)node->ptr);
2546 binder_node_unlock(node);
2547 } else {
2548 struct binder_ref_data dest_rdata;
2549
2550 binder_node_unlock(node);
2551 ret = binder_inc_ref_for_node(target_proc, node,
2552 fp->hdr.type == BINDER_TYPE_HANDLE,
2553 NULL, &dest_rdata);
2554 if (ret)
2555 goto done;
2556
2557 fp->binder = 0;
2558 fp->handle = dest_rdata.desc;
2559 fp->cookie = 0;
2560 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2561 &dest_rdata);
2562 binder_debug(BINDER_DEBUG_TRANSACTION,
2563 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2564 src_rdata.debug_id, src_rdata.desc,
2565 dest_rdata.debug_id, dest_rdata.desc,
2566 node->debug_id);
2567 }
2568done:
2569 binder_put_node(node);
2570 return ret;
2571}
2572
David Brazdil0f672f62019-12-10 10:32:29 +00002573static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002574 struct binder_transaction *t,
2575 struct binder_thread *thread,
2576 struct binder_transaction *in_reply_to)
2577{
2578 struct binder_proc *proc = thread->proc;
2579 struct binder_proc *target_proc = t->to_proc;
David Brazdil0f672f62019-12-10 10:32:29 +00002580 struct binder_txn_fd_fixup *fixup;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002581 struct file *file;
David Brazdil0f672f62019-12-10 10:32:29 +00002582 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002583 bool target_allows_fd;
2584
2585 if (in_reply_to)
2586 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2587 else
2588 target_allows_fd = t->buffer->target_node->accept_fds;
2589 if (!target_allows_fd) {
2590 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2591 proc->pid, thread->pid,
2592 in_reply_to ? "reply" : "transaction",
2593 fd);
2594 ret = -EPERM;
2595 goto err_fd_not_accepted;
2596 }
2597
2598 file = fget(fd);
2599 if (!file) {
2600 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2601 proc->pid, thread->pid, fd);
2602 ret = -EBADF;
2603 goto err_fget;
2604 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002605 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002606 if (ret < 0) {
2607 ret = -EPERM;
2608 goto err_security;
2609 }
2610
David Brazdil0f672f62019-12-10 10:32:29 +00002611 /*
2612 * Add fixup record for this transaction. The allocation
2613 * of the fd in the target needs to be done from a
2614 * target thread.
2615 */
2616 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2617 if (!fixup) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002618 ret = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00002619 goto err_alloc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002620 }
David Brazdil0f672f62019-12-10 10:32:29 +00002621 fixup->file = file;
2622 fixup->offset = fd_offset;
2623 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2624 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002625
David Brazdil0f672f62019-12-10 10:32:29 +00002626 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002627
David Brazdil0f672f62019-12-10 10:32:29 +00002628err_alloc:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002629err_security:
2630 fput(file);
2631err_fget:
2632err_fd_not_accepted:
2633 return ret;
2634}
2635
Olivier Deprez92d4c212022-12-06 15:05:30 +01002636/**
2637 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2638 * @offset offset in target buffer to fixup
2639 * @skip_size bytes to skip in copy (fixup will be written later)
2640 * @fixup_data data to write at fixup offset
2641 * @node list node
2642 *
2643 * This is used for the pointer fixup list (pf) which is created and consumed
2644 * during binder_transaction() and is only accessed locally. No
2645 * locking is necessary.
2646 *
2647 * The list is ordered by @offset.
2648 */
2649struct binder_ptr_fixup {
2650 binder_size_t offset;
2651 size_t skip_size;
2652 binder_uintptr_t fixup_data;
2653 struct list_head node;
2654};
2655
2656/**
2657 * struct binder_sg_copy - scatter-gather data to be copied
2658 * @offset offset in target buffer
2659 * @sender_uaddr user address in source buffer
2660 * @length bytes to copy
2661 * @node list node
2662 *
2663 * This is used for the sg copy list (sgc) which is created and consumed
2664 * during binder_transaction() and is only accessed locally. No
2665 * locking is necessary.
2666 *
2667 * The list is ordered by @offset.
2668 */
2669struct binder_sg_copy {
2670 binder_size_t offset;
2671 const void __user *sender_uaddr;
2672 size_t length;
2673 struct list_head node;
2674};
2675
2676/**
2677 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2678 * @alloc: binder_alloc associated with @buffer
2679 * @buffer: binder buffer in target process
2680 * @sgc_head: list_head of scatter-gather copy list
2681 * @pf_head: list_head of pointer fixup list
2682 *
2683 * Processes all elements of @sgc_head, applying fixups from @pf_head
2684 * and copying the scatter-gather data from the source process' user
2685 * buffer to the target's buffer. It is expected that the list creation
2686 * and processing all occurs during binder_transaction() so these lists
2687 * are only accessed in local context.
2688 *
2689 * Return: 0=success, else -errno
2690 */
2691static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2692 struct binder_buffer *buffer,
2693 struct list_head *sgc_head,
2694 struct list_head *pf_head)
2695{
2696 int ret = 0;
2697 struct binder_sg_copy *sgc, *tmpsgc;
2698 struct binder_ptr_fixup *tmppf;
2699 struct binder_ptr_fixup *pf =
2700 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2701 node);
2702
2703 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2704 size_t bytes_copied = 0;
2705
2706 while (bytes_copied < sgc->length) {
2707 size_t copy_size;
2708 size_t bytes_left = sgc->length - bytes_copied;
2709 size_t offset = sgc->offset + bytes_copied;
2710
2711 /*
2712 * We copy up to the fixup (pointed to by pf)
2713 */
2714 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2715 : bytes_left;
2716 if (!ret && copy_size)
2717 ret = binder_alloc_copy_user_to_buffer(
2718 alloc, buffer,
2719 offset,
2720 sgc->sender_uaddr + bytes_copied,
2721 copy_size);
2722 bytes_copied += copy_size;
2723 if (copy_size != bytes_left) {
2724 BUG_ON(!pf);
2725 /* we stopped at a fixup offset */
2726 if (pf->skip_size) {
2727 /*
2728 * we are just skipping. This is for
2729 * BINDER_TYPE_FDA where the translated
2730 * fds will be fixed up when we get
2731 * to target context.
2732 */
2733 bytes_copied += pf->skip_size;
2734 } else {
2735 /* apply the fixup indicated by pf */
2736 if (!ret)
2737 ret = binder_alloc_copy_to_buffer(
2738 alloc, buffer,
2739 pf->offset,
2740 &pf->fixup_data,
2741 sizeof(pf->fixup_data));
2742 bytes_copied += sizeof(pf->fixup_data);
2743 }
2744 list_del(&pf->node);
2745 kfree(pf);
2746 pf = list_first_entry_or_null(pf_head,
2747 struct binder_ptr_fixup, node);
2748 }
2749 }
2750 list_del(&sgc->node);
2751 kfree(sgc);
2752 }
2753 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2754 BUG_ON(pf->skip_size == 0);
2755 list_del(&pf->node);
2756 kfree(pf);
2757 }
2758 BUG_ON(!list_empty(sgc_head));
2759
2760 return ret > 0 ? -EINVAL : ret;
2761}
2762
2763/**
2764 * binder_cleanup_deferred_txn_lists() - free specified lists
2765 * @sgc_head: list_head of scatter-gather copy list
2766 * @pf_head: list_head of pointer fixup list
2767 *
2768 * Called to clean up @sgc_head and @pf_head if there is an
2769 * error.
2770 */
2771static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2772 struct list_head *pf_head)
2773{
2774 struct binder_sg_copy *sgc, *tmpsgc;
2775 struct binder_ptr_fixup *pf, *tmppf;
2776
2777 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2778 list_del(&sgc->node);
2779 kfree(sgc);
2780 }
2781 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2782 list_del(&pf->node);
2783 kfree(pf);
2784 }
2785}
2786
2787/**
2788 * binder_defer_copy() - queue a scatter-gather buffer for copy
2789 * @sgc_head: list_head of scatter-gather copy list
2790 * @offset: binder buffer offset in target process
2791 * @sender_uaddr: user address in source process
2792 * @length: bytes to copy
2793 *
2794 * Specify a scatter-gather block to be copied. The actual copy must
2795 * be deferred until all the needed fixups are identified and queued.
2796 * Then the copy and fixups are done together so un-translated values
2797 * from the source are never visible in the target buffer.
2798 *
2799 * We are guaranteed that repeated calls to this function will have
2800 * monotonically increasing @offset values so the list will naturally
2801 * be ordered.
2802 *
2803 * Return: 0=success, else -errno
2804 */
2805static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2806 const void __user *sender_uaddr, size_t length)
2807{
2808 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2809
2810 if (!bc)
2811 return -ENOMEM;
2812
2813 bc->offset = offset;
2814 bc->sender_uaddr = sender_uaddr;
2815 bc->length = length;
2816 INIT_LIST_HEAD(&bc->node);
2817
2818 /*
2819 * We are guaranteed that the deferred copies are in-order
2820 * so just add to the tail.
2821 */
2822 list_add_tail(&bc->node, sgc_head);
2823
2824 return 0;
2825}
2826
2827/**
2828 * binder_add_fixup() - queue a fixup to be applied to sg copy
2829 * @pf_head: list_head of binder ptr fixup list
2830 * @offset: binder buffer offset in target process
2831 * @fixup: bytes to be copied for fixup
2832 * @skip_size: bytes to skip when copying (fixup will be applied later)
2833 *
2834 * Add the specified fixup to a list ordered by @offset. When copying
2835 * the scatter-gather buffers, the fixup will be copied instead of
2836 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2837 * will be applied later (in target process context), so we just skip
2838 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2839 * value in @fixup.
2840 *
2841 * This function is called *mostly* in @offset order, but there are
2842 * exceptions. Since out-of-order inserts are relatively uncommon,
2843 * we insert the new element by searching backward from the tail of
2844 * the list.
2845 *
2846 * Return: 0=success, else -errno
2847 */
2848static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2849 binder_uintptr_t fixup, size_t skip_size)
2850{
2851 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2852 struct binder_ptr_fixup *tmppf;
2853
2854 if (!pf)
2855 return -ENOMEM;
2856
2857 pf->offset = offset;
2858 pf->fixup_data = fixup;
2859 pf->skip_size = skip_size;
2860 INIT_LIST_HEAD(&pf->node);
2861
2862 /* Fixups are *mostly* added in-order, but there are some
2863 * exceptions. Look backwards through list for insertion point.
2864 */
2865 list_for_each_entry_reverse(tmppf, pf_head, node) {
2866 if (tmppf->offset < pf->offset) {
2867 list_add(&pf->node, &tmppf->node);
2868 return 0;
2869 }
2870 }
2871 /*
2872 * if we get here, then the new offset is the lowest so
2873 * insert at the head
2874 */
2875 list_add(&pf->node, pf_head);
2876 return 0;
2877}
2878
2879static int binder_translate_fd_array(struct list_head *pf_head,
2880 struct binder_fd_array_object *fda,
2881 const void __user *sender_ubuffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002882 struct binder_buffer_object *parent,
Olivier Deprez92d4c212022-12-06 15:05:30 +01002883 struct binder_buffer_object *sender_uparent,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002884 struct binder_transaction *t,
2885 struct binder_thread *thread,
2886 struct binder_transaction *in_reply_to)
2887{
David Brazdil0f672f62019-12-10 10:32:29 +00002888 binder_size_t fdi, fd_buf_size;
2889 binder_size_t fda_offset;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002890 const void __user *sender_ufda_base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002891 struct binder_proc *proc = thread->proc;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002892 int ret;
2893
2894 if (fda->num_fds == 0)
2895 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002896
2897 fd_buf_size = sizeof(u32) * fda->num_fds;
2898 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2899 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2900 proc->pid, thread->pid, (u64)fda->num_fds);
2901 return -EINVAL;
2902 }
2903 if (fd_buf_size > parent->length ||
2904 fda->parent_offset > parent->length - fd_buf_size) {
2905 /* No space for all file descriptors here. */
2906 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2907 proc->pid, thread->pid, (u64)fda->num_fds);
2908 return -EINVAL;
2909 }
2910 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002911 * the source data for binder_buffer_object is visible
2912 * to user-space and the @buffer element is the user
2913 * pointer to the buffer_object containing the fd_array.
2914 * Convert the address to an offset relative to
2915 * the base of the transaction buffer.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002916 */
David Brazdil0f672f62019-12-10 10:32:29 +00002917 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2918 fda->parent_offset;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002919 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2920 fda->parent_offset;
2921
2922 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2923 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002924 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2925 proc->pid, thread->pid);
2926 return -EINVAL;
2927 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01002928 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2929 if (ret)
2930 return ret;
2931
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002932 for (fdi = 0; fdi < fda->num_fds; fdi++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002933 u32 fd;
David Brazdil0f672f62019-12-10 10:32:29 +00002934 binder_size_t offset = fda_offset + fdi * sizeof(fd);
Olivier Deprez92d4c212022-12-06 15:05:30 +01002935 binder_size_t sender_uoffset = fdi * sizeof(fd);
David Brazdil0f672f62019-12-10 10:32:29 +00002936
Olivier Deprez92d4c212022-12-06 15:05:30 +01002937 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
David Brazdil0f672f62019-12-10 10:32:29 +00002938 if (!ret)
2939 ret = binder_translate_fd(fd, offset, t, thread,
2940 in_reply_to);
Olivier Deprez157378f2022-04-04 15:47:50 +02002941 if (ret)
2942 return ret > 0 ? -EINVAL : ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002943 }
2944 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002945}
2946
Olivier Deprez92d4c212022-12-06 15:05:30 +01002947static int binder_fixup_parent(struct list_head *pf_head,
2948 struct binder_transaction *t,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002949 struct binder_thread *thread,
2950 struct binder_buffer_object *bp,
David Brazdil0f672f62019-12-10 10:32:29 +00002951 binder_size_t off_start_offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002952 binder_size_t num_valid,
David Brazdil0f672f62019-12-10 10:32:29 +00002953 binder_size_t last_fixup_obj_off,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002954 binder_size_t last_fixup_min_off)
2955{
2956 struct binder_buffer_object *parent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002957 struct binder_buffer *b = t->buffer;
2958 struct binder_proc *proc = thread->proc;
2959 struct binder_proc *target_proc = t->to_proc;
David Brazdil0f672f62019-12-10 10:32:29 +00002960 struct binder_object object;
2961 binder_size_t buffer_offset;
2962 binder_size_t parent_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002963
2964 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2965 return 0;
2966
David Brazdil0f672f62019-12-10 10:32:29 +00002967 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2968 off_start_offset, &parent_offset,
2969 num_valid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002970 if (!parent) {
2971 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2972 proc->pid, thread->pid);
2973 return -EINVAL;
2974 }
2975
David Brazdil0f672f62019-12-10 10:32:29 +00002976 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2977 parent_offset, bp->parent_offset,
2978 last_fixup_obj_off,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002979 last_fixup_min_off)) {
2980 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2981 proc->pid, thread->pid);
2982 return -EINVAL;
2983 }
2984
2985 if (parent->length < sizeof(binder_uintptr_t) ||
2986 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2987 /* No space for a pointer here! */
2988 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2989 proc->pid, thread->pid);
2990 return -EINVAL;
2991 }
David Brazdil0f672f62019-12-10 10:32:29 +00002992 buffer_offset = bp->parent_offset +
2993 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002994 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002995}
2996
2997/**
2998 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2999 * @t: transaction to send
3000 * @proc: process to send the transaction to
3001 * @thread: thread in @proc to send the transaction to (may be NULL)
3002 *
3003 * This function queues a transaction to the specified process. It will try
3004 * to find a thread in the target process to handle the transaction and
3005 * wake it up. If no thread is found, the work is queued to the proc
3006 * waitqueue.
3007 *
3008 * If the @thread parameter is not NULL, the transaction is always queued
3009 * to the waitlist of that specific thread.
3010 *
3011 * Return: true if the transactions was successfully queued
3012 * false if the target process or thread is dead
3013 */
3014static bool binder_proc_transaction(struct binder_transaction *t,
3015 struct binder_proc *proc,
3016 struct binder_thread *thread)
3017{
3018 struct binder_node *node = t->buffer->target_node;
3019 bool oneway = !!(t->flags & TF_ONE_WAY);
3020 bool pending_async = false;
3021
3022 BUG_ON(!node);
3023 binder_node_lock(node);
3024 if (oneway) {
3025 BUG_ON(thread);
Olivier Deprez157378f2022-04-04 15:47:50 +02003026 if (node->has_async_transaction)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003027 pending_async = true;
Olivier Deprez157378f2022-04-04 15:47:50 +02003028 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003029 node->has_async_transaction = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003030 }
3031
3032 binder_inner_proc_lock(proc);
3033
3034 if (proc->is_dead || (thread && thread->is_dead)) {
3035 binder_inner_proc_unlock(proc);
3036 binder_node_unlock(node);
3037 return false;
3038 }
3039
3040 if (!thread && !pending_async)
3041 thread = binder_select_thread_ilocked(proc);
3042
3043 if (thread)
3044 binder_enqueue_thread_work_ilocked(thread, &t->work);
3045 else if (!pending_async)
3046 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3047 else
3048 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3049
3050 if (!pending_async)
3051 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3052
3053 binder_inner_proc_unlock(proc);
3054 binder_node_unlock(node);
3055
3056 return true;
3057}
3058
3059/**
3060 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3061 * @node: struct binder_node for which to get refs
3062 * @proc: returns @node->proc if valid
3063 * @error: if no @proc then returns BR_DEAD_REPLY
3064 *
3065 * User-space normally keeps the node alive when creating a transaction
3066 * since it has a reference to the target. The local strong ref keeps it
3067 * alive if the sending process dies before the target process processes
3068 * the transaction. If the source process is malicious or has a reference
3069 * counting bug, relying on the local strong ref can fail.
3070 *
3071 * Since user-space can cause the local strong ref to go away, we also take
3072 * a tmpref on the node to ensure it survives while we are constructing
3073 * the transaction. We also need a tmpref on the proc while we are
3074 * constructing the transaction, so we take that here as well.
3075 *
3076 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3077 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3078 * target proc has died, @error is set to BR_DEAD_REPLY
3079 */
3080static struct binder_node *binder_get_node_refs_for_txn(
3081 struct binder_node *node,
3082 struct binder_proc **procp,
3083 uint32_t *error)
3084{
3085 struct binder_node *target_node = NULL;
3086
3087 binder_node_inner_lock(node);
3088 if (node->proc) {
3089 target_node = node;
3090 binder_inc_node_nilocked(node, 1, 0, NULL);
3091 binder_inc_node_tmpref_ilocked(node);
3092 node->proc->tmp_ref++;
3093 *procp = node->proc;
3094 } else
3095 *error = BR_DEAD_REPLY;
3096 binder_node_inner_unlock(node);
3097
3098 return target_node;
3099}
3100
3101static void binder_transaction(struct binder_proc *proc,
3102 struct binder_thread *thread,
3103 struct binder_transaction_data *tr, int reply,
3104 binder_size_t extra_buffers_size)
3105{
3106 int ret;
3107 struct binder_transaction *t;
David Brazdil0f672f62019-12-10 10:32:29 +00003108 struct binder_work *w;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003109 struct binder_work *tcomplete;
David Brazdil0f672f62019-12-10 10:32:29 +00003110 binder_size_t buffer_offset = 0;
3111 binder_size_t off_start_offset, off_end_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003112 binder_size_t off_min;
David Brazdil0f672f62019-12-10 10:32:29 +00003113 binder_size_t sg_buf_offset, sg_buf_end_offset;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003114 binder_size_t user_offset = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003115 struct binder_proc *target_proc = NULL;
3116 struct binder_thread *target_thread = NULL;
3117 struct binder_node *target_node = NULL;
3118 struct binder_transaction *in_reply_to = NULL;
3119 struct binder_transaction_log_entry *e;
3120 uint32_t return_error = 0;
3121 uint32_t return_error_param = 0;
3122 uint32_t return_error_line = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003123 binder_size_t last_fixup_obj_off = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003124 binder_size_t last_fixup_min_off = 0;
3125 struct binder_context *context = proc->context;
3126 int t_debug_id = atomic_inc_return(&binder_last_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003127 char *secctx = NULL;
3128 u32 secctx_sz = 0;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003129 struct list_head sgc_head;
3130 struct list_head pf_head;
3131 const void __user *user_buffer = (const void __user *)
3132 (uintptr_t)tr->data.ptr.buffer;
3133 INIT_LIST_HEAD(&sgc_head);
3134 INIT_LIST_HEAD(&pf_head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003135
3136 e = binder_transaction_log_add(&binder_transaction_log);
3137 e->debug_id = t_debug_id;
3138 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3139 e->from_proc = proc->pid;
3140 e->from_thread = thread->pid;
3141 e->target_handle = tr->target.handle;
3142 e->data_size = tr->data_size;
3143 e->offsets_size = tr->offsets_size;
David Brazdil0f672f62019-12-10 10:32:29 +00003144 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003145
3146 if (reply) {
3147 binder_inner_proc_lock(proc);
3148 in_reply_to = thread->transaction_stack;
3149 if (in_reply_to == NULL) {
3150 binder_inner_proc_unlock(proc);
3151 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3152 proc->pid, thread->pid);
3153 return_error = BR_FAILED_REPLY;
3154 return_error_param = -EPROTO;
3155 return_error_line = __LINE__;
3156 goto err_empty_call_stack;
3157 }
3158 if (in_reply_to->to_thread != thread) {
3159 spin_lock(&in_reply_to->lock);
3160 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3161 proc->pid, thread->pid, in_reply_to->debug_id,
3162 in_reply_to->to_proc ?
3163 in_reply_to->to_proc->pid : 0,
3164 in_reply_to->to_thread ?
3165 in_reply_to->to_thread->pid : 0);
3166 spin_unlock(&in_reply_to->lock);
3167 binder_inner_proc_unlock(proc);
3168 return_error = BR_FAILED_REPLY;
3169 return_error_param = -EPROTO;
3170 return_error_line = __LINE__;
3171 in_reply_to = NULL;
3172 goto err_bad_call_stack;
3173 }
3174 thread->transaction_stack = in_reply_to->to_parent;
3175 binder_inner_proc_unlock(proc);
3176 binder_set_nice(in_reply_to->saved_priority);
3177 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3178 if (target_thread == NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +00003179 /* annotation for sparse */
3180 __release(&target_thread->proc->inner_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003181 return_error = BR_DEAD_REPLY;
3182 return_error_line = __LINE__;
3183 goto err_dead_binder;
3184 }
3185 if (target_thread->transaction_stack != in_reply_to) {
3186 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3187 proc->pid, thread->pid,
3188 target_thread->transaction_stack ?
3189 target_thread->transaction_stack->debug_id : 0,
3190 in_reply_to->debug_id);
3191 binder_inner_proc_unlock(target_thread->proc);
3192 return_error = BR_FAILED_REPLY;
3193 return_error_param = -EPROTO;
3194 return_error_line = __LINE__;
3195 in_reply_to = NULL;
3196 target_thread = NULL;
3197 goto err_dead_binder;
3198 }
3199 target_proc = target_thread->proc;
3200 target_proc->tmp_ref++;
3201 binder_inner_proc_unlock(target_thread->proc);
3202 } else {
3203 if (tr->target.handle) {
3204 struct binder_ref *ref;
3205
3206 /*
3207 * There must already be a strong ref
3208 * on this node. If so, do a strong
3209 * increment on the node to ensure it
3210 * stays alive until the transaction is
3211 * done.
3212 */
3213 binder_proc_lock(proc);
3214 ref = binder_get_ref_olocked(proc, tr->target.handle,
3215 true);
3216 if (ref) {
3217 target_node = binder_get_node_refs_for_txn(
3218 ref->node, &target_proc,
3219 &return_error);
3220 } else {
3221 binder_user_error("%d:%d got transaction to invalid handle\n",
3222 proc->pid, thread->pid);
3223 return_error = BR_FAILED_REPLY;
3224 }
3225 binder_proc_unlock(proc);
3226 } else {
3227 mutex_lock(&context->context_mgr_node_lock);
3228 target_node = context->binder_context_mgr_node;
3229 if (target_node)
3230 target_node = binder_get_node_refs_for_txn(
3231 target_node, &target_proc,
3232 &return_error);
3233 else
3234 return_error = BR_DEAD_REPLY;
3235 mutex_unlock(&context->context_mgr_node_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00003236 if (target_node && target_proc->pid == proc->pid) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003237 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3238 proc->pid, thread->pid);
3239 return_error = BR_FAILED_REPLY;
3240 return_error_param = -EINVAL;
3241 return_error_line = __LINE__;
3242 goto err_invalid_target_handle;
3243 }
3244 }
3245 if (!target_node) {
3246 /*
3247 * return_error is set above
3248 */
3249 return_error_param = -EINVAL;
3250 return_error_line = __LINE__;
3251 goto err_dead_binder;
3252 }
3253 e->to_node = target_node->debug_id;
Olivier Deprez0e641232021-09-23 10:07:05 +02003254 if (WARN_ON(proc == target_proc)) {
3255 return_error = BR_FAILED_REPLY;
3256 return_error_param = -EINVAL;
3257 return_error_line = __LINE__;
3258 goto err_invalid_target_handle;
3259 }
Olivier Deprez157378f2022-04-04 15:47:50 +02003260 if (security_binder_transaction(proc->cred,
3261 target_proc->cred) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003262 return_error = BR_FAILED_REPLY;
3263 return_error_param = -EPERM;
3264 return_error_line = __LINE__;
3265 goto err_invalid_target_handle;
3266 }
3267 binder_inner_proc_lock(proc);
David Brazdil0f672f62019-12-10 10:32:29 +00003268
3269 w = list_first_entry_or_null(&thread->todo,
3270 struct binder_work, entry);
3271 if (!(tr->flags & TF_ONE_WAY) && w &&
3272 w->type == BINDER_WORK_TRANSACTION) {
3273 /*
3274 * Do not allow new outgoing transaction from a
3275 * thread that has a transaction at the head of
3276 * its todo list. Only need to check the head
3277 * because binder_select_thread_ilocked picks a
3278 * thread from proc->waiting_threads to enqueue
3279 * the transaction, and nothing is queued to the
3280 * todo list while the thread is on waiting_threads.
3281 */
3282 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3283 proc->pid, thread->pid);
3284 binder_inner_proc_unlock(proc);
3285 return_error = BR_FAILED_REPLY;
3286 return_error_param = -EPROTO;
3287 return_error_line = __LINE__;
3288 goto err_bad_todo_list;
3289 }
3290
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003291 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3292 struct binder_transaction *tmp;
3293
3294 tmp = thread->transaction_stack;
3295 if (tmp->to_thread != thread) {
3296 spin_lock(&tmp->lock);
3297 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3298 proc->pid, thread->pid, tmp->debug_id,
3299 tmp->to_proc ? tmp->to_proc->pid : 0,
3300 tmp->to_thread ?
3301 tmp->to_thread->pid : 0);
3302 spin_unlock(&tmp->lock);
3303 binder_inner_proc_unlock(proc);
3304 return_error = BR_FAILED_REPLY;
3305 return_error_param = -EPROTO;
3306 return_error_line = __LINE__;
3307 goto err_bad_call_stack;
3308 }
3309 while (tmp) {
3310 struct binder_thread *from;
3311
3312 spin_lock(&tmp->lock);
3313 from = tmp->from;
3314 if (from && from->proc == target_proc) {
3315 atomic_inc(&from->tmp_ref);
3316 target_thread = from;
3317 spin_unlock(&tmp->lock);
3318 break;
3319 }
3320 spin_unlock(&tmp->lock);
3321 tmp = tmp->from_parent;
3322 }
3323 }
3324 binder_inner_proc_unlock(proc);
3325 }
3326 if (target_thread)
3327 e->to_thread = target_thread->pid;
3328 e->to_proc = target_proc->pid;
3329
3330 /* TODO: reuse incoming transaction for reply */
3331 t = kzalloc(sizeof(*t), GFP_KERNEL);
3332 if (t == NULL) {
3333 return_error = BR_FAILED_REPLY;
3334 return_error_param = -ENOMEM;
3335 return_error_line = __LINE__;
3336 goto err_alloc_t_failed;
3337 }
David Brazdil0f672f62019-12-10 10:32:29 +00003338 INIT_LIST_HEAD(&t->fd_fixups);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003339 binder_stats_created(BINDER_STAT_TRANSACTION);
3340 spin_lock_init(&t->lock);
3341
3342 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3343 if (tcomplete == NULL) {
3344 return_error = BR_FAILED_REPLY;
3345 return_error_param = -ENOMEM;
3346 return_error_line = __LINE__;
3347 goto err_alloc_tcomplete_failed;
3348 }
3349 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3350
3351 t->debug_id = t_debug_id;
3352
3353 if (reply)
3354 binder_debug(BINDER_DEBUG_TRANSACTION,
3355 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3356 proc->pid, thread->pid, t->debug_id,
3357 target_proc->pid, target_thread->pid,
3358 (u64)tr->data.ptr.buffer,
3359 (u64)tr->data.ptr.offsets,
3360 (u64)tr->data_size, (u64)tr->offsets_size,
3361 (u64)extra_buffers_size);
3362 else
3363 binder_debug(BINDER_DEBUG_TRANSACTION,
3364 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3365 proc->pid, thread->pid, t->debug_id,
3366 target_proc->pid, target_node->debug_id,
3367 (u64)tr->data.ptr.buffer,
3368 (u64)tr->data.ptr.offsets,
3369 (u64)tr->data_size, (u64)tr->offsets_size,
3370 (u64)extra_buffers_size);
3371
3372 if (!reply && !(tr->flags & TF_ONE_WAY))
3373 t->from = thread;
3374 else
3375 t->from = NULL;
3376 t->sender_euid = task_euid(proc->tsk);
3377 t->to_proc = target_proc;
3378 t->to_thread = target_thread;
3379 t->code = tr->code;
3380 t->flags = tr->flags;
3381 t->priority = task_nice(current);
3382
David Brazdil0f672f62019-12-10 10:32:29 +00003383 if (target_node && target_node->txn_security_ctx) {
3384 u32 secid;
3385 size_t added_size;
3386
Olivier Deprez157378f2022-04-04 15:47:50 +02003387 security_cred_getsecid(proc->cred, &secid);
David Brazdil0f672f62019-12-10 10:32:29 +00003388 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3389 if (ret) {
3390 return_error = BR_FAILED_REPLY;
3391 return_error_param = ret;
3392 return_error_line = __LINE__;
3393 goto err_get_secctx_failed;
3394 }
3395 added_size = ALIGN(secctx_sz, sizeof(u64));
3396 extra_buffers_size += added_size;
3397 if (extra_buffers_size < added_size) {
3398 /* integer overflow of extra_buffers_size */
3399 return_error = BR_FAILED_REPLY;
3400 return_error_param = EINVAL;
3401 return_error_line = __LINE__;
3402 goto err_bad_extra_size;
3403 }
3404 }
3405
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003406 trace_binder_transaction(reply, t, target_node);
3407
3408 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3409 tr->offsets_size, extra_buffers_size,
Olivier Deprez157378f2022-04-04 15:47:50 +02003410 !reply && (t->flags & TF_ONE_WAY), current->tgid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003411 if (IS_ERR(t->buffer)) {
3412 /*
3413 * -ESRCH indicates VMA cleared. The target is dying.
3414 */
3415 return_error_param = PTR_ERR(t->buffer);
3416 return_error = return_error_param == -ESRCH ?
3417 BR_DEAD_REPLY : BR_FAILED_REPLY;
3418 return_error_line = __LINE__;
3419 t->buffer = NULL;
3420 goto err_binder_alloc_buf_failed;
3421 }
David Brazdil0f672f62019-12-10 10:32:29 +00003422 if (secctx) {
3423 int err;
3424 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3425 ALIGN(tr->offsets_size, sizeof(void *)) +
3426 ALIGN(extra_buffers_size, sizeof(void *)) -
3427 ALIGN(secctx_sz, sizeof(u64));
3428
3429 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3430 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3431 t->buffer, buf_offset,
3432 secctx, secctx_sz);
3433 if (err) {
3434 t->security_ctx = 0;
3435 WARN_ON(1);
3436 }
3437 security_release_secctx(secctx, secctx_sz);
3438 secctx = NULL;
3439 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003440 t->buffer->debug_id = t->debug_id;
3441 t->buffer->transaction = t;
3442 t->buffer->target_node = target_node;
Olivier Deprez0e641232021-09-23 10:07:05 +02003443 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003444 trace_binder_transaction_alloc_buf(t->buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003445
David Brazdil0f672f62019-12-10 10:32:29 +00003446 if (binder_alloc_copy_user_to_buffer(
3447 &target_proc->alloc,
David Brazdil0f672f62019-12-10 10:32:29 +00003448 t->buffer,
3449 ALIGN(tr->data_size, sizeof(void *)),
3450 (const void __user *)
3451 (uintptr_t)tr->data.ptr.offsets,
3452 tr->offsets_size)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003453 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3454 proc->pid, thread->pid);
3455 return_error = BR_FAILED_REPLY;
3456 return_error_param = -EFAULT;
3457 return_error_line = __LINE__;
3458 goto err_copy_data_failed;
3459 }
3460 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3461 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3462 proc->pid, thread->pid, (u64)tr->offsets_size);
3463 return_error = BR_FAILED_REPLY;
3464 return_error_param = -EINVAL;
3465 return_error_line = __LINE__;
3466 goto err_bad_offset;
3467 }
3468 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3469 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3470 proc->pid, thread->pid,
3471 (u64)extra_buffers_size);
3472 return_error = BR_FAILED_REPLY;
3473 return_error_param = -EINVAL;
3474 return_error_line = __LINE__;
3475 goto err_bad_offset;
3476 }
David Brazdil0f672f62019-12-10 10:32:29 +00003477 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3478 buffer_offset = off_start_offset;
3479 off_end_offset = off_start_offset + tr->offsets_size;
3480 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3481 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3482 ALIGN(secctx_sz, sizeof(u64));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003483 off_min = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003484 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3485 buffer_offset += sizeof(binder_size_t)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003486 struct binder_object_header *hdr;
David Brazdil0f672f62019-12-10 10:32:29 +00003487 size_t object_size;
3488 struct binder_object object;
3489 binder_size_t object_offset;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003490 binder_size_t copy_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003491
David Brazdil0f672f62019-12-10 10:32:29 +00003492 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3493 &object_offset,
3494 t->buffer,
3495 buffer_offset,
3496 sizeof(object_offset))) {
3497 return_error = BR_FAILED_REPLY;
3498 return_error_param = -EINVAL;
3499 return_error_line = __LINE__;
3500 goto err_bad_offset;
3501 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01003502
3503 /*
3504 * Copy the source user buffer up to the next object
3505 * that will be processed.
3506 */
3507 copy_size = object_offset - user_offset;
3508 if (copy_size && (user_offset > object_offset ||
3509 binder_alloc_copy_user_to_buffer(
3510 &target_proc->alloc,
3511 t->buffer, user_offset,
3512 user_buffer + user_offset,
3513 copy_size))) {
3514 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3515 proc->pid, thread->pid);
3516 return_error = BR_FAILED_REPLY;
3517 return_error_param = -EFAULT;
3518 return_error_line = __LINE__;
3519 goto err_copy_data_failed;
3520 }
3521 object_size = binder_get_object(target_proc, user_buffer,
3522 t->buffer, object_offset, &object);
David Brazdil0f672f62019-12-10 10:32:29 +00003523 if (object_size == 0 || object_offset < off_min) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003524 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
David Brazdil0f672f62019-12-10 10:32:29 +00003525 proc->pid, thread->pid,
3526 (u64)object_offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003527 (u64)off_min,
3528 (u64)t->buffer->data_size);
3529 return_error = BR_FAILED_REPLY;
3530 return_error_param = -EINVAL;
3531 return_error_line = __LINE__;
3532 goto err_bad_offset;
3533 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01003534 /*
3535 * Set offset to the next buffer fragment to be
3536 * copied
3537 */
3538 user_offset = object_offset + object_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003539
David Brazdil0f672f62019-12-10 10:32:29 +00003540 hdr = &object.hdr;
3541 off_min = object_offset + object_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003542 switch (hdr->type) {
3543 case BINDER_TYPE_BINDER:
3544 case BINDER_TYPE_WEAK_BINDER: {
3545 struct flat_binder_object *fp;
3546
3547 fp = to_flat_binder_object(hdr);
3548 ret = binder_translate_binder(fp, t, thread);
David Brazdil0f672f62019-12-10 10:32:29 +00003549
3550 if (ret < 0 ||
3551 binder_alloc_copy_to_buffer(&target_proc->alloc,
3552 t->buffer,
3553 object_offset,
3554 fp, sizeof(*fp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003555 return_error = BR_FAILED_REPLY;
3556 return_error_param = ret;
3557 return_error_line = __LINE__;
3558 goto err_translate_failed;
3559 }
3560 } break;
3561 case BINDER_TYPE_HANDLE:
3562 case BINDER_TYPE_WEAK_HANDLE: {
3563 struct flat_binder_object *fp;
3564
3565 fp = to_flat_binder_object(hdr);
3566 ret = binder_translate_handle(fp, t, thread);
David Brazdil0f672f62019-12-10 10:32:29 +00003567 if (ret < 0 ||
3568 binder_alloc_copy_to_buffer(&target_proc->alloc,
3569 t->buffer,
3570 object_offset,
3571 fp, sizeof(*fp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003572 return_error = BR_FAILED_REPLY;
3573 return_error_param = ret;
3574 return_error_line = __LINE__;
3575 goto err_translate_failed;
3576 }
3577 } break;
3578
3579 case BINDER_TYPE_FD: {
3580 struct binder_fd_object *fp = to_binder_fd_object(hdr);
David Brazdil0f672f62019-12-10 10:32:29 +00003581 binder_size_t fd_offset = object_offset +
3582 (uintptr_t)&fp->fd - (uintptr_t)fp;
3583 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3584 thread, in_reply_to);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003585
David Brazdil0f672f62019-12-10 10:32:29 +00003586 fp->pad_binder = 0;
3587 if (ret < 0 ||
3588 binder_alloc_copy_to_buffer(&target_proc->alloc,
3589 t->buffer,
3590 object_offset,
3591 fp, sizeof(*fp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003592 return_error = BR_FAILED_REPLY;
David Brazdil0f672f62019-12-10 10:32:29 +00003593 return_error_param = ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003594 return_error_line = __LINE__;
3595 goto err_translate_failed;
3596 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003597 } break;
3598 case BINDER_TYPE_FDA: {
David Brazdil0f672f62019-12-10 10:32:29 +00003599 struct binder_object ptr_object;
3600 binder_size_t parent_offset;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003601 struct binder_object user_object;
3602 size_t user_parent_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003603 struct binder_fd_array_object *fda =
3604 to_binder_fd_array_object(hdr);
Olivier Deprez0e641232021-09-23 10:07:05 +02003605 size_t num_valid = (buffer_offset - off_start_offset) /
David Brazdil0f672f62019-12-10 10:32:29 +00003606 sizeof(binder_size_t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003607 struct binder_buffer_object *parent =
David Brazdil0f672f62019-12-10 10:32:29 +00003608 binder_validate_ptr(target_proc, t->buffer,
3609 &ptr_object, fda->parent,
3610 off_start_offset,
3611 &parent_offset,
3612 num_valid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003613 if (!parent) {
3614 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3615 proc->pid, thread->pid);
3616 return_error = BR_FAILED_REPLY;
3617 return_error_param = -EINVAL;
3618 return_error_line = __LINE__;
3619 goto err_bad_parent;
3620 }
David Brazdil0f672f62019-12-10 10:32:29 +00003621 if (!binder_validate_fixup(target_proc, t->buffer,
3622 off_start_offset,
3623 parent_offset,
3624 fda->parent_offset,
3625 last_fixup_obj_off,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003626 last_fixup_min_off)) {
3627 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3628 proc->pid, thread->pid);
3629 return_error = BR_FAILED_REPLY;
3630 return_error_param = -EINVAL;
3631 return_error_line = __LINE__;
3632 goto err_bad_parent;
3633 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01003634 /*
3635 * We need to read the user version of the parent
3636 * object to get the original user offset
3637 */
3638 user_parent_size =
3639 binder_get_object(proc, user_buffer, t->buffer,
3640 parent_offset, &user_object);
3641 if (user_parent_size != sizeof(user_object.bbo)) {
3642 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3643 proc->pid, thread->pid,
3644 user_parent_size,
3645 sizeof(user_object.bbo));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003646 return_error = BR_FAILED_REPLY;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003647 return_error_param = -EINVAL;
3648 return_error_line = __LINE__;
3649 goto err_bad_parent;
3650 }
3651 ret = binder_translate_fd_array(&pf_head, fda,
3652 user_buffer, parent,
3653 &user_object.bbo, t,
3654 thread, in_reply_to);
3655 if (!ret)
3656 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3657 t->buffer,
3658 object_offset,
3659 fda, sizeof(*fda));
3660 if (ret) {
3661 return_error = BR_FAILED_REPLY;
3662 return_error_param = ret > 0 ? -EINVAL : ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003663 return_error_line = __LINE__;
3664 goto err_translate_failed;
3665 }
David Brazdil0f672f62019-12-10 10:32:29 +00003666 last_fixup_obj_off = parent_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003667 last_fixup_min_off =
3668 fda->parent_offset + sizeof(u32) * fda->num_fds;
3669 } break;
3670 case BINDER_TYPE_PTR: {
3671 struct binder_buffer_object *bp =
3672 to_binder_buffer_object(hdr);
David Brazdil0f672f62019-12-10 10:32:29 +00003673 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3674 size_t num_valid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003675
3676 if (bp->length > buf_left) {
3677 binder_user_error("%d:%d got transaction with too large buffer\n",
3678 proc->pid, thread->pid);
3679 return_error = BR_FAILED_REPLY;
3680 return_error_param = -EINVAL;
3681 return_error_line = __LINE__;
3682 goto err_bad_offset;
3683 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01003684 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3685 (const void __user *)(uintptr_t)bp->buffer,
3686 bp->length);
3687 if (ret) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003688 return_error = BR_FAILED_REPLY;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003689 return_error_param = ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003690 return_error_line = __LINE__;
Olivier Deprez92d4c212022-12-06 15:05:30 +01003691 goto err_translate_failed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003692 }
3693 /* Fixup buffer pointer to target proc address space */
David Brazdil0f672f62019-12-10 10:32:29 +00003694 bp->buffer = (uintptr_t)
3695 t->buffer->user_data + sg_buf_offset;
3696 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003697
Olivier Deprez0e641232021-09-23 10:07:05 +02003698 num_valid = (buffer_offset - off_start_offset) /
David Brazdil0f672f62019-12-10 10:32:29 +00003699 sizeof(binder_size_t);
Olivier Deprez92d4c212022-12-06 15:05:30 +01003700 ret = binder_fixup_parent(&pf_head, t,
3701 thread, bp,
David Brazdil0f672f62019-12-10 10:32:29 +00003702 off_start_offset,
3703 num_valid,
3704 last_fixup_obj_off,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003705 last_fixup_min_off);
David Brazdil0f672f62019-12-10 10:32:29 +00003706 if (ret < 0 ||
3707 binder_alloc_copy_to_buffer(&target_proc->alloc,
3708 t->buffer,
3709 object_offset,
3710 bp, sizeof(*bp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003711 return_error = BR_FAILED_REPLY;
3712 return_error_param = ret;
3713 return_error_line = __LINE__;
3714 goto err_translate_failed;
3715 }
David Brazdil0f672f62019-12-10 10:32:29 +00003716 last_fixup_obj_off = object_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003717 last_fixup_min_off = 0;
3718 } break;
3719 default:
3720 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3721 proc->pid, thread->pid, hdr->type);
3722 return_error = BR_FAILED_REPLY;
3723 return_error_param = -EINVAL;
3724 return_error_line = __LINE__;
3725 goto err_bad_object_type;
3726 }
3727 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01003728 /* Done processing objects, copy the rest of the buffer */
3729 if (binder_alloc_copy_user_to_buffer(
3730 &target_proc->alloc,
3731 t->buffer, user_offset,
3732 user_buffer + user_offset,
3733 tr->data_size - user_offset)) {
3734 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3735 proc->pid, thread->pid);
3736 return_error = BR_FAILED_REPLY;
3737 return_error_param = -EFAULT;
3738 return_error_line = __LINE__;
3739 goto err_copy_data_failed;
3740 }
3741
3742 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3743 &sgc_head, &pf_head);
3744 if (ret) {
3745 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3746 proc->pid, thread->pid);
3747 return_error = BR_FAILED_REPLY;
3748 return_error_param = ret;
3749 return_error_line = __LINE__;
3750 goto err_copy_data_failed;
3751 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003752 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3753 t->work.type = BINDER_WORK_TRANSACTION;
3754
3755 if (reply) {
3756 binder_enqueue_thread_work(thread, tcomplete);
3757 binder_inner_proc_lock(target_proc);
3758 if (target_thread->is_dead) {
3759 binder_inner_proc_unlock(target_proc);
3760 goto err_dead_proc_or_thread;
3761 }
3762 BUG_ON(t->buffer->async_transaction != 0);
3763 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3764 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3765 binder_inner_proc_unlock(target_proc);
3766 wake_up_interruptible_sync(&target_thread->wait);
3767 binder_free_transaction(in_reply_to);
3768 } else if (!(t->flags & TF_ONE_WAY)) {
3769 BUG_ON(t->buffer->async_transaction != 0);
3770 binder_inner_proc_lock(proc);
3771 /*
3772 * Defer the TRANSACTION_COMPLETE, so we don't return to
3773 * userspace immediately; this allows the target process to
3774 * immediately start processing this transaction, reducing
3775 * latency. We will then return the TRANSACTION_COMPLETE when
3776 * the target replies (or there is an error).
3777 */
3778 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3779 t->need_reply = 1;
3780 t->from_parent = thread->transaction_stack;
3781 thread->transaction_stack = t;
3782 binder_inner_proc_unlock(proc);
3783 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3784 binder_inner_proc_lock(proc);
3785 binder_pop_transaction_ilocked(thread, t);
3786 binder_inner_proc_unlock(proc);
3787 goto err_dead_proc_or_thread;
3788 }
3789 } else {
3790 BUG_ON(target_node == NULL);
3791 BUG_ON(t->buffer->async_transaction != 1);
3792 binder_enqueue_thread_work(thread, tcomplete);
3793 if (!binder_proc_transaction(t, target_proc, NULL))
3794 goto err_dead_proc_or_thread;
3795 }
3796 if (target_thread)
3797 binder_thread_dec_tmpref(target_thread);
3798 binder_proc_dec_tmpref(target_proc);
3799 if (target_node)
3800 binder_dec_node_tmpref(target_node);
3801 /*
3802 * write barrier to synchronize with initialization
3803 * of log entry
3804 */
3805 smp_wmb();
3806 WRITE_ONCE(e->debug_id_done, t_debug_id);
3807 return;
3808
3809err_dead_proc_or_thread:
3810 return_error = BR_DEAD_REPLY;
3811 return_error_line = __LINE__;
3812 binder_dequeue_work(proc, tcomplete);
3813err_translate_failed:
3814err_bad_object_type:
3815err_bad_offset:
3816err_bad_parent:
3817err_copy_data_failed:
Olivier Deprez92d4c212022-12-06 15:05:30 +01003818 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
David Brazdil0f672f62019-12-10 10:32:29 +00003819 binder_free_txn_fixups(t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003820 trace_binder_transaction_failed_buffer_release(t->buffer);
Olivier Deprez157378f2022-04-04 15:47:50 +02003821 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
David Brazdil0f672f62019-12-10 10:32:29 +00003822 buffer_offset, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003823 if (target_node)
3824 binder_dec_node_tmpref(target_node);
3825 target_node = NULL;
3826 t->buffer->transaction = NULL;
3827 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3828err_binder_alloc_buf_failed:
David Brazdil0f672f62019-12-10 10:32:29 +00003829err_bad_extra_size:
3830 if (secctx)
3831 security_release_secctx(secctx, secctx_sz);
3832err_get_secctx_failed:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003833 kfree(tcomplete);
3834 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3835err_alloc_tcomplete_failed:
3836 kfree(t);
3837 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3838err_alloc_t_failed:
David Brazdil0f672f62019-12-10 10:32:29 +00003839err_bad_todo_list:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003840err_bad_call_stack:
3841err_empty_call_stack:
3842err_dead_binder:
3843err_invalid_target_handle:
3844 if (target_thread)
3845 binder_thread_dec_tmpref(target_thread);
3846 if (target_proc)
3847 binder_proc_dec_tmpref(target_proc);
3848 if (target_node) {
3849 binder_dec_node(target_node, 1, 0);
3850 binder_dec_node_tmpref(target_node);
3851 }
3852
3853 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3854 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3855 proc->pid, thread->pid, return_error, return_error_param,
3856 (u64)tr->data_size, (u64)tr->offsets_size,
3857 return_error_line);
3858
3859 {
3860 struct binder_transaction_log_entry *fe;
3861
3862 e->return_error = return_error;
3863 e->return_error_param = return_error_param;
3864 e->return_error_line = return_error_line;
3865 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3866 *fe = *e;
3867 /*
3868 * write barrier to synchronize with initialization
3869 * of log entry
3870 */
3871 smp_wmb();
3872 WRITE_ONCE(e->debug_id_done, t_debug_id);
3873 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3874 }
3875
3876 BUG_ON(thread->return_error.cmd != BR_OK);
3877 if (in_reply_to) {
3878 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3879 binder_enqueue_thread_work(thread, &thread->return_error.work);
3880 binder_send_failed_reply(in_reply_to, return_error);
3881 } else {
3882 thread->return_error.cmd = return_error;
3883 binder_enqueue_thread_work(thread, &thread->return_error.work);
3884 }
3885}
3886
David Brazdil0f672f62019-12-10 10:32:29 +00003887/**
3888 * binder_free_buf() - free the specified buffer
3889 * @proc: binder proc that owns buffer
3890 * @buffer: buffer to be freed
Olivier Deprez157378f2022-04-04 15:47:50 +02003891 * @is_failure: failed to send transaction
David Brazdil0f672f62019-12-10 10:32:29 +00003892 *
3893 * If buffer for an async transaction, enqueue the next async
3894 * transaction from the node.
3895 *
3896 * Cleanup buffer and free it.
3897 */
3898static void
Olivier Deprez157378f2022-04-04 15:47:50 +02003899binder_free_buf(struct binder_proc *proc,
3900 struct binder_thread *thread,
3901 struct binder_buffer *buffer, bool is_failure)
David Brazdil0f672f62019-12-10 10:32:29 +00003902{
3903 binder_inner_proc_lock(proc);
3904 if (buffer->transaction) {
3905 buffer->transaction->buffer = NULL;
3906 buffer->transaction = NULL;
3907 }
3908 binder_inner_proc_unlock(proc);
3909 if (buffer->async_transaction && buffer->target_node) {
3910 struct binder_node *buf_node;
3911 struct binder_work *w;
3912
3913 buf_node = buffer->target_node;
3914 binder_node_inner_lock(buf_node);
3915 BUG_ON(!buf_node->has_async_transaction);
3916 BUG_ON(buf_node->proc != proc);
3917 w = binder_dequeue_work_head_ilocked(
3918 &buf_node->async_todo);
3919 if (!w) {
3920 buf_node->has_async_transaction = false;
3921 } else {
3922 binder_enqueue_work_ilocked(
3923 w, &proc->todo);
3924 binder_wakeup_proc_ilocked(proc);
3925 }
3926 binder_node_inner_unlock(buf_node);
3927 }
3928 trace_binder_transaction_buffer_release(buffer);
Olivier Deprez157378f2022-04-04 15:47:50 +02003929 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
David Brazdil0f672f62019-12-10 10:32:29 +00003930 binder_alloc_free_buf(&proc->alloc, buffer);
3931}
3932
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003933static int binder_thread_write(struct binder_proc *proc,
3934 struct binder_thread *thread,
3935 binder_uintptr_t binder_buffer, size_t size,
3936 binder_size_t *consumed)
3937{
3938 uint32_t cmd;
3939 struct binder_context *context = proc->context;
3940 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3941 void __user *ptr = buffer + *consumed;
3942 void __user *end = buffer + size;
3943
3944 while (ptr < end && thread->return_error.cmd == BR_OK) {
3945 int ret;
3946
3947 if (get_user(cmd, (uint32_t __user *)ptr))
3948 return -EFAULT;
3949 ptr += sizeof(uint32_t);
3950 trace_binder_command(cmd);
3951 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3952 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3953 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3954 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3955 }
3956 switch (cmd) {
3957 case BC_INCREFS:
3958 case BC_ACQUIRE:
3959 case BC_RELEASE:
3960 case BC_DECREFS: {
3961 uint32_t target;
3962 const char *debug_string;
3963 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3964 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3965 struct binder_ref_data rdata;
3966
3967 if (get_user(target, (uint32_t __user *)ptr))
3968 return -EFAULT;
3969
3970 ptr += sizeof(uint32_t);
3971 ret = -1;
3972 if (increment && !target) {
3973 struct binder_node *ctx_mgr_node;
3974 mutex_lock(&context->context_mgr_node_lock);
3975 ctx_mgr_node = context->binder_context_mgr_node;
Olivier Deprez0e641232021-09-23 10:07:05 +02003976 if (ctx_mgr_node) {
3977 if (ctx_mgr_node->proc == proc) {
3978 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3979 proc->pid, thread->pid);
3980 mutex_unlock(&context->context_mgr_node_lock);
3981 return -EINVAL;
3982 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003983 ret = binder_inc_ref_for_node(
3984 proc, ctx_mgr_node,
3985 strong, NULL, &rdata);
Olivier Deprez0e641232021-09-23 10:07:05 +02003986 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003987 mutex_unlock(&context->context_mgr_node_lock);
3988 }
3989 if (ret)
3990 ret = binder_update_ref_for_handle(
3991 proc, target, increment, strong,
3992 &rdata);
3993 if (!ret && rdata.desc != target) {
3994 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3995 proc->pid, thread->pid,
3996 target, rdata.desc);
3997 }
3998 switch (cmd) {
3999 case BC_INCREFS:
4000 debug_string = "IncRefs";
4001 break;
4002 case BC_ACQUIRE:
4003 debug_string = "Acquire";
4004 break;
4005 case BC_RELEASE:
4006 debug_string = "Release";
4007 break;
4008 case BC_DECREFS:
4009 default:
4010 debug_string = "DecRefs";
4011 break;
4012 }
4013 if (ret) {
4014 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4015 proc->pid, thread->pid, debug_string,
4016 strong, target, ret);
4017 break;
4018 }
4019 binder_debug(BINDER_DEBUG_USER_REFS,
4020 "%d:%d %s ref %d desc %d s %d w %d\n",
4021 proc->pid, thread->pid, debug_string,
4022 rdata.debug_id, rdata.desc, rdata.strong,
4023 rdata.weak);
4024 break;
4025 }
4026 case BC_INCREFS_DONE:
4027 case BC_ACQUIRE_DONE: {
4028 binder_uintptr_t node_ptr;
4029 binder_uintptr_t cookie;
4030 struct binder_node *node;
4031 bool free_node;
4032
4033 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4034 return -EFAULT;
4035 ptr += sizeof(binder_uintptr_t);
4036 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4037 return -EFAULT;
4038 ptr += sizeof(binder_uintptr_t);
4039 node = binder_get_node(proc, node_ptr);
4040 if (node == NULL) {
4041 binder_user_error("%d:%d %s u%016llx no match\n",
4042 proc->pid, thread->pid,
4043 cmd == BC_INCREFS_DONE ?
4044 "BC_INCREFS_DONE" :
4045 "BC_ACQUIRE_DONE",
4046 (u64)node_ptr);
4047 break;
4048 }
4049 if (cookie != node->cookie) {
4050 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4051 proc->pid, thread->pid,
4052 cmd == BC_INCREFS_DONE ?
4053 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4054 (u64)node_ptr, node->debug_id,
4055 (u64)cookie, (u64)node->cookie);
4056 binder_put_node(node);
4057 break;
4058 }
4059 binder_node_inner_lock(node);
4060 if (cmd == BC_ACQUIRE_DONE) {
4061 if (node->pending_strong_ref == 0) {
4062 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4063 proc->pid, thread->pid,
4064 node->debug_id);
4065 binder_node_inner_unlock(node);
4066 binder_put_node(node);
4067 break;
4068 }
4069 node->pending_strong_ref = 0;
4070 } else {
4071 if (node->pending_weak_ref == 0) {
4072 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4073 proc->pid, thread->pid,
4074 node->debug_id);
4075 binder_node_inner_unlock(node);
4076 binder_put_node(node);
4077 break;
4078 }
4079 node->pending_weak_ref = 0;
4080 }
4081 free_node = binder_dec_node_nilocked(node,
4082 cmd == BC_ACQUIRE_DONE, 0);
4083 WARN_ON(free_node);
4084 binder_debug(BINDER_DEBUG_USER_REFS,
4085 "%d:%d %s node %d ls %d lw %d tr %d\n",
4086 proc->pid, thread->pid,
4087 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4088 node->debug_id, node->local_strong_refs,
4089 node->local_weak_refs, node->tmp_refs);
4090 binder_node_inner_unlock(node);
4091 binder_put_node(node);
4092 break;
4093 }
4094 case BC_ATTEMPT_ACQUIRE:
4095 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4096 return -EINVAL;
4097 case BC_ACQUIRE_RESULT:
4098 pr_err("BC_ACQUIRE_RESULT not supported\n");
4099 return -EINVAL;
4100
4101 case BC_FREE_BUFFER: {
4102 binder_uintptr_t data_ptr;
4103 struct binder_buffer *buffer;
4104
4105 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4106 return -EFAULT;
4107 ptr += sizeof(binder_uintptr_t);
4108
4109 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4110 data_ptr);
4111 if (IS_ERR_OR_NULL(buffer)) {
4112 if (PTR_ERR(buffer) == -EPERM) {
4113 binder_user_error(
4114 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4115 proc->pid, thread->pid,
4116 (u64)data_ptr);
4117 } else {
4118 binder_user_error(
4119 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4120 proc->pid, thread->pid,
4121 (u64)data_ptr);
4122 }
4123 break;
4124 }
4125 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4126 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4127 proc->pid, thread->pid, (u64)data_ptr,
4128 buffer->debug_id,
4129 buffer->transaction ? "active" : "finished");
Olivier Deprez157378f2022-04-04 15:47:50 +02004130 binder_free_buf(proc, thread, buffer, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004131 break;
4132 }
4133
4134 case BC_TRANSACTION_SG:
4135 case BC_REPLY_SG: {
4136 struct binder_transaction_data_sg tr;
4137
4138 if (copy_from_user(&tr, ptr, sizeof(tr)))
4139 return -EFAULT;
4140 ptr += sizeof(tr);
4141 binder_transaction(proc, thread, &tr.transaction_data,
4142 cmd == BC_REPLY_SG, tr.buffers_size);
4143 break;
4144 }
4145 case BC_TRANSACTION:
4146 case BC_REPLY: {
4147 struct binder_transaction_data tr;
4148
4149 if (copy_from_user(&tr, ptr, sizeof(tr)))
4150 return -EFAULT;
4151 ptr += sizeof(tr);
4152 binder_transaction(proc, thread, &tr,
4153 cmd == BC_REPLY, 0);
4154 break;
4155 }
4156
4157 case BC_REGISTER_LOOPER:
4158 binder_debug(BINDER_DEBUG_THREADS,
4159 "%d:%d BC_REGISTER_LOOPER\n",
4160 proc->pid, thread->pid);
4161 binder_inner_proc_lock(proc);
4162 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4163 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4164 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4165 proc->pid, thread->pid);
4166 } else if (proc->requested_threads == 0) {
4167 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4168 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4169 proc->pid, thread->pid);
4170 } else {
4171 proc->requested_threads--;
4172 proc->requested_threads_started++;
4173 }
4174 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4175 binder_inner_proc_unlock(proc);
4176 break;
4177 case BC_ENTER_LOOPER:
4178 binder_debug(BINDER_DEBUG_THREADS,
4179 "%d:%d BC_ENTER_LOOPER\n",
4180 proc->pid, thread->pid);
4181 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4182 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4183 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4184 proc->pid, thread->pid);
4185 }
4186 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4187 break;
4188 case BC_EXIT_LOOPER:
4189 binder_debug(BINDER_DEBUG_THREADS,
4190 "%d:%d BC_EXIT_LOOPER\n",
4191 proc->pid, thread->pid);
4192 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4193 break;
4194
4195 case BC_REQUEST_DEATH_NOTIFICATION:
4196 case BC_CLEAR_DEATH_NOTIFICATION: {
4197 uint32_t target;
4198 binder_uintptr_t cookie;
4199 struct binder_ref *ref;
4200 struct binder_ref_death *death = NULL;
4201
4202 if (get_user(target, (uint32_t __user *)ptr))
4203 return -EFAULT;
4204 ptr += sizeof(uint32_t);
4205 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4206 return -EFAULT;
4207 ptr += sizeof(binder_uintptr_t);
4208 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4209 /*
4210 * Allocate memory for death notification
4211 * before taking lock
4212 */
4213 death = kzalloc(sizeof(*death), GFP_KERNEL);
4214 if (death == NULL) {
4215 WARN_ON(thread->return_error.cmd !=
4216 BR_OK);
4217 thread->return_error.cmd = BR_ERROR;
4218 binder_enqueue_thread_work(
4219 thread,
4220 &thread->return_error.work);
4221 binder_debug(
4222 BINDER_DEBUG_FAILED_TRANSACTION,
4223 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4224 proc->pid, thread->pid);
4225 break;
4226 }
4227 }
4228 binder_proc_lock(proc);
4229 ref = binder_get_ref_olocked(proc, target, false);
4230 if (ref == NULL) {
4231 binder_user_error("%d:%d %s invalid ref %d\n",
4232 proc->pid, thread->pid,
4233 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4234 "BC_REQUEST_DEATH_NOTIFICATION" :
4235 "BC_CLEAR_DEATH_NOTIFICATION",
4236 target);
4237 binder_proc_unlock(proc);
4238 kfree(death);
4239 break;
4240 }
4241
4242 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4243 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4244 proc->pid, thread->pid,
4245 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4246 "BC_REQUEST_DEATH_NOTIFICATION" :
4247 "BC_CLEAR_DEATH_NOTIFICATION",
4248 (u64)cookie, ref->data.debug_id,
4249 ref->data.desc, ref->data.strong,
4250 ref->data.weak, ref->node->debug_id);
4251
4252 binder_node_lock(ref->node);
4253 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4254 if (ref->death) {
4255 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4256 proc->pid, thread->pid);
4257 binder_node_unlock(ref->node);
4258 binder_proc_unlock(proc);
4259 kfree(death);
4260 break;
4261 }
4262 binder_stats_created(BINDER_STAT_DEATH);
4263 INIT_LIST_HEAD(&death->work.entry);
4264 death->cookie = cookie;
4265 ref->death = death;
4266 if (ref->node->proc == NULL) {
4267 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4268
4269 binder_inner_proc_lock(proc);
4270 binder_enqueue_work_ilocked(
4271 &ref->death->work, &proc->todo);
4272 binder_wakeup_proc_ilocked(proc);
4273 binder_inner_proc_unlock(proc);
4274 }
4275 } else {
4276 if (ref->death == NULL) {
4277 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4278 proc->pid, thread->pid);
4279 binder_node_unlock(ref->node);
4280 binder_proc_unlock(proc);
4281 break;
4282 }
4283 death = ref->death;
4284 if (death->cookie != cookie) {
4285 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4286 proc->pid, thread->pid,
4287 (u64)death->cookie,
4288 (u64)cookie);
4289 binder_node_unlock(ref->node);
4290 binder_proc_unlock(proc);
4291 break;
4292 }
4293 ref->death = NULL;
4294 binder_inner_proc_lock(proc);
4295 if (list_empty(&death->work.entry)) {
4296 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4297 if (thread->looper &
4298 (BINDER_LOOPER_STATE_REGISTERED |
4299 BINDER_LOOPER_STATE_ENTERED))
4300 binder_enqueue_thread_work_ilocked(
4301 thread,
4302 &death->work);
4303 else {
4304 binder_enqueue_work_ilocked(
4305 &death->work,
4306 &proc->todo);
4307 binder_wakeup_proc_ilocked(
4308 proc);
4309 }
4310 } else {
4311 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4312 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4313 }
4314 binder_inner_proc_unlock(proc);
4315 }
4316 binder_node_unlock(ref->node);
4317 binder_proc_unlock(proc);
4318 } break;
4319 case BC_DEAD_BINDER_DONE: {
4320 struct binder_work *w;
4321 binder_uintptr_t cookie;
4322 struct binder_ref_death *death = NULL;
4323
4324 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4325 return -EFAULT;
4326
4327 ptr += sizeof(cookie);
4328 binder_inner_proc_lock(proc);
4329 list_for_each_entry(w, &proc->delivered_death,
4330 entry) {
4331 struct binder_ref_death *tmp_death =
4332 container_of(w,
4333 struct binder_ref_death,
4334 work);
4335
4336 if (tmp_death->cookie == cookie) {
4337 death = tmp_death;
4338 break;
4339 }
4340 }
4341 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4342 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4343 proc->pid, thread->pid, (u64)cookie,
4344 death);
4345 if (death == NULL) {
4346 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4347 proc->pid, thread->pid, (u64)cookie);
4348 binder_inner_proc_unlock(proc);
4349 break;
4350 }
4351 binder_dequeue_work_ilocked(&death->work);
4352 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4353 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4354 if (thread->looper &
4355 (BINDER_LOOPER_STATE_REGISTERED |
4356 BINDER_LOOPER_STATE_ENTERED))
4357 binder_enqueue_thread_work_ilocked(
4358 thread, &death->work);
4359 else {
4360 binder_enqueue_work_ilocked(
4361 &death->work,
4362 &proc->todo);
4363 binder_wakeup_proc_ilocked(proc);
4364 }
4365 }
4366 binder_inner_proc_unlock(proc);
4367 } break;
4368
4369 default:
4370 pr_err("%d:%d unknown command %d\n",
4371 proc->pid, thread->pid, cmd);
4372 return -EINVAL;
4373 }
4374 *consumed = ptr - buffer;
4375 }
4376 return 0;
4377}
4378
4379static void binder_stat_br(struct binder_proc *proc,
4380 struct binder_thread *thread, uint32_t cmd)
4381{
4382 trace_binder_return(cmd);
4383 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4384 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4385 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4386 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4387 }
4388}
4389
4390static int binder_put_node_cmd(struct binder_proc *proc,
4391 struct binder_thread *thread,
4392 void __user **ptrp,
4393 binder_uintptr_t node_ptr,
4394 binder_uintptr_t node_cookie,
4395 int node_debug_id,
4396 uint32_t cmd, const char *cmd_name)
4397{
4398 void __user *ptr = *ptrp;
4399
4400 if (put_user(cmd, (uint32_t __user *)ptr))
4401 return -EFAULT;
4402 ptr += sizeof(uint32_t);
4403
4404 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4405 return -EFAULT;
4406 ptr += sizeof(binder_uintptr_t);
4407
4408 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4409 return -EFAULT;
4410 ptr += sizeof(binder_uintptr_t);
4411
4412 binder_stat_br(proc, thread, cmd);
4413 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4414 proc->pid, thread->pid, cmd_name, node_debug_id,
4415 (u64)node_ptr, (u64)node_cookie);
4416
4417 *ptrp = ptr;
4418 return 0;
4419}
4420
4421static int binder_wait_for_work(struct binder_thread *thread,
4422 bool do_proc_work)
4423{
4424 DEFINE_WAIT(wait);
4425 struct binder_proc *proc = thread->proc;
4426 int ret = 0;
4427
4428 freezer_do_not_count();
4429 binder_inner_proc_lock(proc);
4430 for (;;) {
4431 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4432 if (binder_has_work_ilocked(thread, do_proc_work))
4433 break;
4434 if (do_proc_work)
4435 list_add(&thread->waiting_thread_node,
4436 &proc->waiting_threads);
4437 binder_inner_proc_unlock(proc);
4438 schedule();
4439 binder_inner_proc_lock(proc);
4440 list_del_init(&thread->waiting_thread_node);
4441 if (signal_pending(current)) {
4442 ret = -ERESTARTSYS;
4443 break;
4444 }
4445 }
4446 finish_wait(&thread->wait, &wait);
4447 binder_inner_proc_unlock(proc);
4448 freezer_count();
4449
4450 return ret;
4451}
4452
David Brazdil0f672f62019-12-10 10:32:29 +00004453/**
4454 * binder_apply_fd_fixups() - finish fd translation
4455 * @proc: binder_proc associated @t->buffer
4456 * @t: binder transaction with list of fd fixups
4457 *
4458 * Now that we are in the context of the transaction target
4459 * process, we can allocate and install fds. Process the
4460 * list of fds to translate and fixup the buffer with the
4461 * new fds.
4462 *
4463 * If we fail to allocate an fd, then free the resources by
4464 * fput'ing files that have not been processed and ksys_close'ing
4465 * any fds that have already been allocated.
4466 */
4467static int binder_apply_fd_fixups(struct binder_proc *proc,
4468 struct binder_transaction *t)
4469{
4470 struct binder_txn_fd_fixup *fixup, *tmp;
4471 int ret = 0;
4472
4473 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4474 int fd = get_unused_fd_flags(O_CLOEXEC);
4475
4476 if (fd < 0) {
4477 binder_debug(BINDER_DEBUG_TRANSACTION,
4478 "failed fd fixup txn %d fd %d\n",
4479 t->debug_id, fd);
4480 ret = -ENOMEM;
4481 break;
4482 }
4483 binder_debug(BINDER_DEBUG_TRANSACTION,
4484 "fd fixup txn %d fd %d\n",
4485 t->debug_id, fd);
4486 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4487 fd_install(fd, fixup->file);
4488 fixup->file = NULL;
4489 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4490 fixup->offset, &fd,
4491 sizeof(u32))) {
4492 ret = -EINVAL;
4493 break;
4494 }
4495 }
4496 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4497 if (fixup->file) {
4498 fput(fixup->file);
4499 } else if (ret) {
4500 u32 fd;
4501 int err;
4502
4503 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4504 t->buffer,
4505 fixup->offset,
4506 sizeof(fd));
4507 WARN_ON(err);
4508 if (!err)
4509 binder_deferred_fd_close(fd);
4510 }
4511 list_del(&fixup->fixup_entry);
4512 kfree(fixup);
4513 }
4514
4515 return ret;
4516}
4517
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004518static int binder_thread_read(struct binder_proc *proc,
4519 struct binder_thread *thread,
4520 binder_uintptr_t binder_buffer, size_t size,
4521 binder_size_t *consumed, int non_block)
4522{
4523 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4524 void __user *ptr = buffer + *consumed;
4525 void __user *end = buffer + size;
4526
4527 int ret = 0;
4528 int wait_for_proc_work;
4529
4530 if (*consumed == 0) {
4531 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4532 return -EFAULT;
4533 ptr += sizeof(uint32_t);
4534 }
4535
4536retry:
4537 binder_inner_proc_lock(proc);
4538 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4539 binder_inner_proc_unlock(proc);
4540
4541 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4542
4543 trace_binder_wait_for_work(wait_for_proc_work,
4544 !!thread->transaction_stack,
4545 !binder_worklist_empty(proc, &thread->todo));
4546 if (wait_for_proc_work) {
4547 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4548 BINDER_LOOPER_STATE_ENTERED))) {
4549 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4550 proc->pid, thread->pid, thread->looper);
4551 wait_event_interruptible(binder_user_error_wait,
4552 binder_stop_on_user_error < 2);
4553 }
4554 binder_set_nice(proc->default_priority);
4555 }
4556
4557 if (non_block) {
4558 if (!binder_has_work(thread, wait_for_proc_work))
4559 ret = -EAGAIN;
4560 } else {
4561 ret = binder_wait_for_work(thread, wait_for_proc_work);
4562 }
4563
4564 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4565
4566 if (ret)
4567 return ret;
4568
4569 while (1) {
4570 uint32_t cmd;
David Brazdil0f672f62019-12-10 10:32:29 +00004571 struct binder_transaction_data_secctx tr;
4572 struct binder_transaction_data *trd = &tr.transaction_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004573 struct binder_work *w = NULL;
4574 struct list_head *list = NULL;
4575 struct binder_transaction *t = NULL;
4576 struct binder_thread *t_from;
David Brazdil0f672f62019-12-10 10:32:29 +00004577 size_t trsize = sizeof(*trd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004578
4579 binder_inner_proc_lock(proc);
4580 if (!binder_worklist_empty_ilocked(&thread->todo))
4581 list = &thread->todo;
4582 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4583 wait_for_proc_work)
4584 list = &proc->todo;
4585 else {
4586 binder_inner_proc_unlock(proc);
4587
4588 /* no data added */
4589 if (ptr - buffer == 4 && !thread->looper_need_return)
4590 goto retry;
4591 break;
4592 }
4593
4594 if (end - ptr < sizeof(tr) + 4) {
4595 binder_inner_proc_unlock(proc);
4596 break;
4597 }
4598 w = binder_dequeue_work_head_ilocked(list);
4599 if (binder_worklist_empty_ilocked(&thread->todo))
4600 thread->process_todo = false;
4601
4602 switch (w->type) {
4603 case BINDER_WORK_TRANSACTION: {
4604 binder_inner_proc_unlock(proc);
4605 t = container_of(w, struct binder_transaction, work);
4606 } break;
4607 case BINDER_WORK_RETURN_ERROR: {
4608 struct binder_error *e = container_of(
4609 w, struct binder_error, work);
4610
4611 WARN_ON(e->cmd == BR_OK);
4612 binder_inner_proc_unlock(proc);
4613 if (put_user(e->cmd, (uint32_t __user *)ptr))
4614 return -EFAULT;
4615 cmd = e->cmd;
4616 e->cmd = BR_OK;
4617 ptr += sizeof(uint32_t);
4618
4619 binder_stat_br(proc, thread, cmd);
4620 } break;
4621 case BINDER_WORK_TRANSACTION_COMPLETE: {
4622 binder_inner_proc_unlock(proc);
4623 cmd = BR_TRANSACTION_COMPLETE;
David Brazdil0f672f62019-12-10 10:32:29 +00004624 kfree(w);
4625 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004626 if (put_user(cmd, (uint32_t __user *)ptr))
4627 return -EFAULT;
4628 ptr += sizeof(uint32_t);
4629
4630 binder_stat_br(proc, thread, cmd);
4631 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4632 "%d:%d BR_TRANSACTION_COMPLETE\n",
4633 proc->pid, thread->pid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004634 } break;
4635 case BINDER_WORK_NODE: {
4636 struct binder_node *node = container_of(w, struct binder_node, work);
4637 int strong, weak;
4638 binder_uintptr_t node_ptr = node->ptr;
4639 binder_uintptr_t node_cookie = node->cookie;
4640 int node_debug_id = node->debug_id;
4641 int has_weak_ref;
4642 int has_strong_ref;
4643 void __user *orig_ptr = ptr;
4644
4645 BUG_ON(proc != node->proc);
4646 strong = node->internal_strong_refs ||
4647 node->local_strong_refs;
4648 weak = !hlist_empty(&node->refs) ||
4649 node->local_weak_refs ||
4650 node->tmp_refs || strong;
4651 has_strong_ref = node->has_strong_ref;
4652 has_weak_ref = node->has_weak_ref;
4653
4654 if (weak && !has_weak_ref) {
4655 node->has_weak_ref = 1;
4656 node->pending_weak_ref = 1;
4657 node->local_weak_refs++;
4658 }
4659 if (strong && !has_strong_ref) {
4660 node->has_strong_ref = 1;
4661 node->pending_strong_ref = 1;
4662 node->local_strong_refs++;
4663 }
4664 if (!strong && has_strong_ref)
4665 node->has_strong_ref = 0;
4666 if (!weak && has_weak_ref)
4667 node->has_weak_ref = 0;
4668 if (!weak && !strong) {
4669 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4670 "%d:%d node %d u%016llx c%016llx deleted\n",
4671 proc->pid, thread->pid,
4672 node_debug_id,
4673 (u64)node_ptr,
4674 (u64)node_cookie);
4675 rb_erase(&node->rb_node, &proc->nodes);
4676 binder_inner_proc_unlock(proc);
4677 binder_node_lock(node);
4678 /*
4679 * Acquire the node lock before freeing the
4680 * node to serialize with other threads that
4681 * may have been holding the node lock while
4682 * decrementing this node (avoids race where
4683 * this thread frees while the other thread
4684 * is unlocking the node after the final
4685 * decrement)
4686 */
4687 binder_node_unlock(node);
4688 binder_free_node(node);
4689 } else
4690 binder_inner_proc_unlock(proc);
4691
4692 if (weak && !has_weak_ref)
4693 ret = binder_put_node_cmd(
4694 proc, thread, &ptr, node_ptr,
4695 node_cookie, node_debug_id,
4696 BR_INCREFS, "BR_INCREFS");
4697 if (!ret && strong && !has_strong_ref)
4698 ret = binder_put_node_cmd(
4699 proc, thread, &ptr, node_ptr,
4700 node_cookie, node_debug_id,
4701 BR_ACQUIRE, "BR_ACQUIRE");
4702 if (!ret && !strong && has_strong_ref)
4703 ret = binder_put_node_cmd(
4704 proc, thread, &ptr, node_ptr,
4705 node_cookie, node_debug_id,
4706 BR_RELEASE, "BR_RELEASE");
4707 if (!ret && !weak && has_weak_ref)
4708 ret = binder_put_node_cmd(
4709 proc, thread, &ptr, node_ptr,
4710 node_cookie, node_debug_id,
4711 BR_DECREFS, "BR_DECREFS");
4712 if (orig_ptr == ptr)
4713 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4714 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4715 proc->pid, thread->pid,
4716 node_debug_id,
4717 (u64)node_ptr,
4718 (u64)node_cookie);
4719 if (ret)
4720 return ret;
4721 } break;
4722 case BINDER_WORK_DEAD_BINDER:
4723 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4724 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4725 struct binder_ref_death *death;
4726 uint32_t cmd;
4727 binder_uintptr_t cookie;
4728
4729 death = container_of(w, struct binder_ref_death, work);
4730 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4731 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4732 else
4733 cmd = BR_DEAD_BINDER;
4734 cookie = death->cookie;
4735
4736 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4737 "%d:%d %s %016llx\n",
4738 proc->pid, thread->pid,
4739 cmd == BR_DEAD_BINDER ?
4740 "BR_DEAD_BINDER" :
4741 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4742 (u64)cookie);
4743 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4744 binder_inner_proc_unlock(proc);
4745 kfree(death);
4746 binder_stats_deleted(BINDER_STAT_DEATH);
4747 } else {
4748 binder_enqueue_work_ilocked(
4749 w, &proc->delivered_death);
4750 binder_inner_proc_unlock(proc);
4751 }
4752 if (put_user(cmd, (uint32_t __user *)ptr))
4753 return -EFAULT;
4754 ptr += sizeof(uint32_t);
4755 if (put_user(cookie,
4756 (binder_uintptr_t __user *)ptr))
4757 return -EFAULT;
4758 ptr += sizeof(binder_uintptr_t);
4759 binder_stat_br(proc, thread, cmd);
4760 if (cmd == BR_DEAD_BINDER)
4761 goto done; /* DEAD_BINDER notifications can cause transactions */
4762 } break;
David Brazdil0f672f62019-12-10 10:32:29 +00004763 default:
4764 binder_inner_proc_unlock(proc);
4765 pr_err("%d:%d: bad work type %d\n",
4766 proc->pid, thread->pid, w->type);
4767 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004768 }
4769
4770 if (!t)
4771 continue;
4772
4773 BUG_ON(t->buffer == NULL);
4774 if (t->buffer->target_node) {
4775 struct binder_node *target_node = t->buffer->target_node;
4776
David Brazdil0f672f62019-12-10 10:32:29 +00004777 trd->target.ptr = target_node->ptr;
4778 trd->cookie = target_node->cookie;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004779 t->saved_priority = task_nice(current);
4780 if (t->priority < target_node->min_priority &&
4781 !(t->flags & TF_ONE_WAY))
4782 binder_set_nice(t->priority);
4783 else if (!(t->flags & TF_ONE_WAY) ||
4784 t->saved_priority > target_node->min_priority)
4785 binder_set_nice(target_node->min_priority);
4786 cmd = BR_TRANSACTION;
4787 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00004788 trd->target.ptr = 0;
4789 trd->cookie = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004790 cmd = BR_REPLY;
4791 }
David Brazdil0f672f62019-12-10 10:32:29 +00004792 trd->code = t->code;
4793 trd->flags = t->flags;
4794 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004795
4796 t_from = binder_get_txn_from(t);
4797 if (t_from) {
4798 struct task_struct *sender = t_from->proc->tsk;
4799
David Brazdil0f672f62019-12-10 10:32:29 +00004800 trd->sender_pid =
4801 task_tgid_nr_ns(sender,
4802 task_active_pid_ns(current));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004803 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00004804 trd->sender_pid = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004805 }
4806
David Brazdil0f672f62019-12-10 10:32:29 +00004807 ret = binder_apply_fd_fixups(proc, t);
4808 if (ret) {
4809 struct binder_buffer *buffer = t->buffer;
4810 bool oneway = !!(t->flags & TF_ONE_WAY);
4811 int tid = t->debug_id;
4812
4813 if (t_from)
4814 binder_thread_dec_tmpref(t_from);
4815 buffer->transaction = NULL;
4816 binder_cleanup_transaction(t, "fd fixups failed",
4817 BR_FAILED_REPLY);
Olivier Deprez157378f2022-04-04 15:47:50 +02004818 binder_free_buf(proc, thread, buffer, true);
David Brazdil0f672f62019-12-10 10:32:29 +00004819 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4820 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4821 proc->pid, thread->pid,
4822 oneway ? "async " :
4823 (cmd == BR_REPLY ? "reply " : ""),
4824 tid, BR_FAILED_REPLY, ret, __LINE__);
4825 if (cmd == BR_REPLY) {
4826 cmd = BR_FAILED_REPLY;
4827 if (put_user(cmd, (uint32_t __user *)ptr))
4828 return -EFAULT;
4829 ptr += sizeof(uint32_t);
4830 binder_stat_br(proc, thread, cmd);
4831 break;
4832 }
4833 continue;
4834 }
4835 trd->data_size = t->buffer->data_size;
4836 trd->offsets_size = t->buffer->offsets_size;
4837 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4838 trd->data.ptr.offsets = trd->data.ptr.buffer +
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004839 ALIGN(t->buffer->data_size,
4840 sizeof(void *));
4841
David Brazdil0f672f62019-12-10 10:32:29 +00004842 tr.secctx = t->security_ctx;
4843 if (t->security_ctx) {
4844 cmd = BR_TRANSACTION_SEC_CTX;
4845 trsize = sizeof(tr);
4846 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004847 if (put_user(cmd, (uint32_t __user *)ptr)) {
4848 if (t_from)
4849 binder_thread_dec_tmpref(t_from);
4850
4851 binder_cleanup_transaction(t, "put_user failed",
4852 BR_FAILED_REPLY);
4853
4854 return -EFAULT;
4855 }
4856 ptr += sizeof(uint32_t);
David Brazdil0f672f62019-12-10 10:32:29 +00004857 if (copy_to_user(ptr, &tr, trsize)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004858 if (t_from)
4859 binder_thread_dec_tmpref(t_from);
4860
4861 binder_cleanup_transaction(t, "copy_to_user failed",
4862 BR_FAILED_REPLY);
4863
4864 return -EFAULT;
4865 }
David Brazdil0f672f62019-12-10 10:32:29 +00004866 ptr += trsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004867
4868 trace_binder_transaction_received(t);
4869 binder_stat_br(proc, thread, cmd);
4870 binder_debug(BINDER_DEBUG_TRANSACTION,
4871 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4872 proc->pid, thread->pid,
4873 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
David Brazdil0f672f62019-12-10 10:32:29 +00004874 (cmd == BR_TRANSACTION_SEC_CTX) ?
4875 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004876 t->debug_id, t_from ? t_from->proc->pid : 0,
4877 t_from ? t_from->pid : 0, cmd,
4878 t->buffer->data_size, t->buffer->offsets_size,
David Brazdil0f672f62019-12-10 10:32:29 +00004879 (u64)trd->data.ptr.buffer,
4880 (u64)trd->data.ptr.offsets);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004881
4882 if (t_from)
4883 binder_thread_dec_tmpref(t_from);
4884 t->buffer->allow_user_free = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00004885 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004886 binder_inner_proc_lock(thread->proc);
4887 t->to_parent = thread->transaction_stack;
4888 t->to_thread = thread;
4889 thread->transaction_stack = t;
4890 binder_inner_proc_unlock(thread->proc);
4891 } else {
4892 binder_free_transaction(t);
4893 }
4894 break;
4895 }
4896
4897done:
4898
4899 *consumed = ptr - buffer;
4900 binder_inner_proc_lock(proc);
4901 if (proc->requested_threads == 0 &&
4902 list_empty(&thread->proc->waiting_threads) &&
4903 proc->requested_threads_started < proc->max_threads &&
4904 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4905 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4906 /*spawn a new thread if we leave this out */) {
4907 proc->requested_threads++;
4908 binder_inner_proc_unlock(proc);
4909 binder_debug(BINDER_DEBUG_THREADS,
4910 "%d:%d BR_SPAWN_LOOPER\n",
4911 proc->pid, thread->pid);
4912 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4913 return -EFAULT;
4914 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4915 } else
4916 binder_inner_proc_unlock(proc);
4917 return 0;
4918}
4919
4920static void binder_release_work(struct binder_proc *proc,
4921 struct list_head *list)
4922{
4923 struct binder_work *w;
Olivier Deprez0e641232021-09-23 10:07:05 +02004924 enum binder_work_type wtype;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004925
4926 while (1) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004927 binder_inner_proc_lock(proc);
4928 w = binder_dequeue_work_head_ilocked(list);
4929 wtype = w ? w->type : 0;
4930 binder_inner_proc_unlock(proc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004931 if (!w)
4932 return;
4933
Olivier Deprez0e641232021-09-23 10:07:05 +02004934 switch (wtype) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004935 case BINDER_WORK_TRANSACTION: {
4936 struct binder_transaction *t;
4937
4938 t = container_of(w, struct binder_transaction, work);
4939
4940 binder_cleanup_transaction(t, "process died.",
4941 BR_DEAD_REPLY);
4942 } break;
4943 case BINDER_WORK_RETURN_ERROR: {
4944 struct binder_error *e = container_of(
4945 w, struct binder_error, work);
4946
4947 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4948 "undelivered TRANSACTION_ERROR: %u\n",
4949 e->cmd);
4950 } break;
4951 case BINDER_WORK_TRANSACTION_COMPLETE: {
4952 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4953 "undelivered TRANSACTION_COMPLETE\n");
4954 kfree(w);
4955 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4956 } break;
4957 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4958 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4959 struct binder_ref_death *death;
4960
4961 death = container_of(w, struct binder_ref_death, work);
4962 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4963 "undelivered death notification, %016llx\n",
4964 (u64)death->cookie);
4965 kfree(death);
4966 binder_stats_deleted(BINDER_STAT_DEATH);
4967 } break;
Olivier Deprez0e641232021-09-23 10:07:05 +02004968 case BINDER_WORK_NODE:
4969 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004970 default:
4971 pr_err("unexpected work type, %d, not freed\n",
Olivier Deprez0e641232021-09-23 10:07:05 +02004972 wtype);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004973 break;
4974 }
4975 }
4976
4977}
4978
4979static struct binder_thread *binder_get_thread_ilocked(
4980 struct binder_proc *proc, struct binder_thread *new_thread)
4981{
4982 struct binder_thread *thread = NULL;
4983 struct rb_node *parent = NULL;
4984 struct rb_node **p = &proc->threads.rb_node;
4985
4986 while (*p) {
4987 parent = *p;
4988 thread = rb_entry(parent, struct binder_thread, rb_node);
4989
4990 if (current->pid < thread->pid)
4991 p = &(*p)->rb_left;
4992 else if (current->pid > thread->pid)
4993 p = &(*p)->rb_right;
4994 else
4995 return thread;
4996 }
4997 if (!new_thread)
4998 return NULL;
4999 thread = new_thread;
5000 binder_stats_created(BINDER_STAT_THREAD);
5001 thread->proc = proc;
5002 thread->pid = current->pid;
5003 atomic_set(&thread->tmp_ref, 0);
5004 init_waitqueue_head(&thread->wait);
5005 INIT_LIST_HEAD(&thread->todo);
5006 rb_link_node(&thread->rb_node, parent, p);
5007 rb_insert_color(&thread->rb_node, &proc->threads);
5008 thread->looper_need_return = true;
5009 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5010 thread->return_error.cmd = BR_OK;
5011 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5012 thread->reply_error.cmd = BR_OK;
5013 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5014 return thread;
5015}
5016
5017static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5018{
5019 struct binder_thread *thread;
5020 struct binder_thread *new_thread;
5021
5022 binder_inner_proc_lock(proc);
5023 thread = binder_get_thread_ilocked(proc, NULL);
5024 binder_inner_proc_unlock(proc);
5025 if (!thread) {
5026 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5027 if (new_thread == NULL)
5028 return NULL;
5029 binder_inner_proc_lock(proc);
5030 thread = binder_get_thread_ilocked(proc, new_thread);
5031 binder_inner_proc_unlock(proc);
5032 if (thread != new_thread)
5033 kfree(new_thread);
5034 }
5035 return thread;
5036}
5037
5038static void binder_free_proc(struct binder_proc *proc)
5039{
Olivier Deprez0e641232021-09-23 10:07:05 +02005040 struct binder_device *device;
5041
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005042 BUG_ON(!list_empty(&proc->todo));
5043 BUG_ON(!list_empty(&proc->delivered_death));
Olivier Deprez0e641232021-09-23 10:07:05 +02005044 device = container_of(proc->context, struct binder_device, context);
5045 if (refcount_dec_and_test(&device->ref)) {
5046 kfree(proc->context->name);
5047 kfree(device);
5048 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005049 binder_alloc_deferred_release(&proc->alloc);
5050 put_task_struct(proc->tsk);
Olivier Deprez157378f2022-04-04 15:47:50 +02005051 put_cred(proc->cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005052 binder_stats_deleted(BINDER_STAT_PROC);
5053 kfree(proc);
5054}
5055
5056static void binder_free_thread(struct binder_thread *thread)
5057{
5058 BUG_ON(!list_empty(&thread->todo));
5059 binder_stats_deleted(BINDER_STAT_THREAD);
5060 binder_proc_dec_tmpref(thread->proc);
5061 kfree(thread);
5062}
5063
5064static int binder_thread_release(struct binder_proc *proc,
5065 struct binder_thread *thread)
5066{
5067 struct binder_transaction *t;
5068 struct binder_transaction *send_reply = NULL;
5069 int active_transactions = 0;
5070 struct binder_transaction *last_t = NULL;
5071
5072 binder_inner_proc_lock(thread->proc);
5073 /*
5074 * take a ref on the proc so it survives
5075 * after we remove this thread from proc->threads.
5076 * The corresponding dec is when we actually
5077 * free the thread in binder_free_thread()
5078 */
5079 proc->tmp_ref++;
5080 /*
5081 * take a ref on this thread to ensure it
5082 * survives while we are releasing it
5083 */
5084 atomic_inc(&thread->tmp_ref);
5085 rb_erase(&thread->rb_node, &proc->threads);
5086 t = thread->transaction_stack;
5087 if (t) {
5088 spin_lock(&t->lock);
5089 if (t->to_thread == thread)
5090 send_reply = t;
David Brazdil0f672f62019-12-10 10:32:29 +00005091 } else {
5092 __acquire(&t->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005093 }
5094 thread->is_dead = true;
5095
5096 while (t) {
5097 last_t = t;
5098 active_transactions++;
5099 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5100 "release %d:%d transaction %d %s, still active\n",
5101 proc->pid, thread->pid,
5102 t->debug_id,
5103 (t->to_thread == thread) ? "in" : "out");
5104
5105 if (t->to_thread == thread) {
5106 t->to_proc = NULL;
5107 t->to_thread = NULL;
5108 if (t->buffer) {
5109 t->buffer->transaction = NULL;
5110 t->buffer = NULL;
5111 }
5112 t = t->to_parent;
5113 } else if (t->from == thread) {
5114 t->from = NULL;
5115 t = t->from_parent;
5116 } else
5117 BUG();
5118 spin_unlock(&last_t->lock);
5119 if (t)
5120 spin_lock(&t->lock);
David Brazdil0f672f62019-12-10 10:32:29 +00005121 else
5122 __acquire(&t->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005123 }
David Brazdil0f672f62019-12-10 10:32:29 +00005124 /* annotation for sparse, lock not acquired in last iteration above */
5125 __release(&t->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005126
5127 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02005128 * If this thread used poll, make sure we remove the waitqueue from any
5129 * poll data structures holding it.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005130 */
Olivier Deprez157378f2022-04-04 15:47:50 +02005131 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5132 wake_up_pollfree(&thread->wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005133
5134 binder_inner_proc_unlock(thread->proc);
5135
5136 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02005137 * This is needed to avoid races between wake_up_pollfree() above and
5138 * someone else removing the last entry from the queue for other reasons
5139 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5140 * descriptor being closed). Such other users hold an RCU read lock, so
5141 * we can be sure they're done after we call synchronize_rcu().
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005142 */
5143 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5144 synchronize_rcu();
5145
5146 if (send_reply)
5147 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5148 binder_release_work(proc, &thread->todo);
5149 binder_thread_dec_tmpref(thread);
5150 return active_transactions;
5151}
5152
5153static __poll_t binder_poll(struct file *filp,
5154 struct poll_table_struct *wait)
5155{
5156 struct binder_proc *proc = filp->private_data;
5157 struct binder_thread *thread = NULL;
5158 bool wait_for_proc_work;
5159
5160 thread = binder_get_thread(proc);
5161 if (!thread)
5162 return POLLERR;
5163
5164 binder_inner_proc_lock(thread->proc);
5165 thread->looper |= BINDER_LOOPER_STATE_POLL;
5166 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5167
5168 binder_inner_proc_unlock(thread->proc);
5169
5170 poll_wait(filp, &thread->wait, wait);
5171
5172 if (binder_has_work(thread, wait_for_proc_work))
5173 return EPOLLIN;
5174
5175 return 0;
5176}
5177
5178static int binder_ioctl_write_read(struct file *filp,
5179 unsigned int cmd, unsigned long arg,
5180 struct binder_thread *thread)
5181{
5182 int ret = 0;
5183 struct binder_proc *proc = filp->private_data;
5184 unsigned int size = _IOC_SIZE(cmd);
5185 void __user *ubuf = (void __user *)arg;
5186 struct binder_write_read bwr;
5187
5188 if (size != sizeof(struct binder_write_read)) {
5189 ret = -EINVAL;
5190 goto out;
5191 }
5192 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5193 ret = -EFAULT;
5194 goto out;
5195 }
5196 binder_debug(BINDER_DEBUG_READ_WRITE,
5197 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5198 proc->pid, thread->pid,
5199 (u64)bwr.write_size, (u64)bwr.write_buffer,
5200 (u64)bwr.read_size, (u64)bwr.read_buffer);
5201
5202 if (bwr.write_size > 0) {
5203 ret = binder_thread_write(proc, thread,
5204 bwr.write_buffer,
5205 bwr.write_size,
5206 &bwr.write_consumed);
5207 trace_binder_write_done(ret);
5208 if (ret < 0) {
5209 bwr.read_consumed = 0;
5210 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5211 ret = -EFAULT;
5212 goto out;
5213 }
5214 }
5215 if (bwr.read_size > 0) {
5216 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5217 bwr.read_size,
5218 &bwr.read_consumed,
5219 filp->f_flags & O_NONBLOCK);
5220 trace_binder_read_done(ret);
5221 binder_inner_proc_lock(proc);
5222 if (!binder_worklist_empty_ilocked(&proc->todo))
5223 binder_wakeup_proc_ilocked(proc);
5224 binder_inner_proc_unlock(proc);
5225 if (ret < 0) {
5226 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5227 ret = -EFAULT;
5228 goto out;
5229 }
5230 }
5231 binder_debug(BINDER_DEBUG_READ_WRITE,
5232 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5233 proc->pid, thread->pid,
5234 (u64)bwr.write_consumed, (u64)bwr.write_size,
5235 (u64)bwr.read_consumed, (u64)bwr.read_size);
5236 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5237 ret = -EFAULT;
5238 goto out;
5239 }
5240out:
5241 return ret;
5242}
5243
David Brazdil0f672f62019-12-10 10:32:29 +00005244static int binder_ioctl_set_ctx_mgr(struct file *filp,
5245 struct flat_binder_object *fbo)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005246{
5247 int ret = 0;
5248 struct binder_proc *proc = filp->private_data;
5249 struct binder_context *context = proc->context;
5250 struct binder_node *new_node;
5251 kuid_t curr_euid = current_euid();
5252
5253 mutex_lock(&context->context_mgr_node_lock);
5254 if (context->binder_context_mgr_node) {
5255 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5256 ret = -EBUSY;
5257 goto out;
5258 }
Olivier Deprez157378f2022-04-04 15:47:50 +02005259 ret = security_binder_set_context_mgr(proc->cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005260 if (ret < 0)
5261 goto out;
5262 if (uid_valid(context->binder_context_mgr_uid)) {
5263 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5264 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5265 from_kuid(&init_user_ns, curr_euid),
5266 from_kuid(&init_user_ns,
5267 context->binder_context_mgr_uid));
5268 ret = -EPERM;
5269 goto out;
5270 }
5271 } else {
5272 context->binder_context_mgr_uid = curr_euid;
5273 }
David Brazdil0f672f62019-12-10 10:32:29 +00005274 new_node = binder_new_node(proc, fbo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005275 if (!new_node) {
5276 ret = -ENOMEM;
5277 goto out;
5278 }
5279 binder_node_lock(new_node);
5280 new_node->local_weak_refs++;
5281 new_node->local_strong_refs++;
5282 new_node->has_strong_ref = 1;
5283 new_node->has_weak_ref = 1;
5284 context->binder_context_mgr_node = new_node;
5285 binder_node_unlock(new_node);
5286 binder_put_node(new_node);
5287out:
5288 mutex_unlock(&context->context_mgr_node_lock);
5289 return ret;
5290}
5291
David Brazdil0f672f62019-12-10 10:32:29 +00005292static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5293 struct binder_node_info_for_ref *info)
5294{
5295 struct binder_node *node;
5296 struct binder_context *context = proc->context;
5297 __u32 handle = info->handle;
5298
5299 if (info->strong_count || info->weak_count || info->reserved1 ||
5300 info->reserved2 || info->reserved3) {
5301 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5302 proc->pid);
5303 return -EINVAL;
5304 }
5305
5306 /* This ioctl may only be used by the context manager */
5307 mutex_lock(&context->context_mgr_node_lock);
5308 if (!context->binder_context_mgr_node ||
5309 context->binder_context_mgr_node->proc != proc) {
5310 mutex_unlock(&context->context_mgr_node_lock);
5311 return -EPERM;
5312 }
5313 mutex_unlock(&context->context_mgr_node_lock);
5314
5315 node = binder_get_node_from_ref(proc, handle, true, NULL);
5316 if (!node)
5317 return -EINVAL;
5318
5319 info->strong_count = node->local_strong_refs +
5320 node->internal_strong_refs;
5321 info->weak_count = node->local_weak_refs;
5322
5323 binder_put_node(node);
5324
5325 return 0;
5326}
5327
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005328static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5329 struct binder_node_debug_info *info)
5330{
5331 struct rb_node *n;
5332 binder_uintptr_t ptr = info->ptr;
5333
5334 memset(info, 0, sizeof(*info));
5335
5336 binder_inner_proc_lock(proc);
5337 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5338 struct binder_node *node = rb_entry(n, struct binder_node,
5339 rb_node);
5340 if (node->ptr > ptr) {
5341 info->ptr = node->ptr;
5342 info->cookie = node->cookie;
5343 info->has_strong_ref = node->has_strong_ref;
5344 info->has_weak_ref = node->has_weak_ref;
5345 break;
5346 }
5347 }
5348 binder_inner_proc_unlock(proc);
5349
5350 return 0;
5351}
5352
5353static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5354{
5355 int ret;
5356 struct binder_proc *proc = filp->private_data;
5357 struct binder_thread *thread;
5358 unsigned int size = _IOC_SIZE(cmd);
5359 void __user *ubuf = (void __user *)arg;
5360
5361 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5362 proc->pid, current->pid, cmd, arg);*/
5363
5364 binder_selftest_alloc(&proc->alloc);
5365
5366 trace_binder_ioctl(cmd, arg);
5367
5368 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5369 if (ret)
5370 goto err_unlocked;
5371
5372 thread = binder_get_thread(proc);
5373 if (thread == NULL) {
5374 ret = -ENOMEM;
5375 goto err;
5376 }
5377
5378 switch (cmd) {
5379 case BINDER_WRITE_READ:
5380 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5381 if (ret)
5382 goto err;
5383 break;
5384 case BINDER_SET_MAX_THREADS: {
5385 int max_threads;
5386
5387 if (copy_from_user(&max_threads, ubuf,
5388 sizeof(max_threads))) {
5389 ret = -EINVAL;
5390 goto err;
5391 }
5392 binder_inner_proc_lock(proc);
5393 proc->max_threads = max_threads;
5394 binder_inner_proc_unlock(proc);
5395 break;
5396 }
David Brazdil0f672f62019-12-10 10:32:29 +00005397 case BINDER_SET_CONTEXT_MGR_EXT: {
5398 struct flat_binder_object fbo;
5399
5400 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5401 ret = -EINVAL;
5402 goto err;
5403 }
5404 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5405 if (ret)
5406 goto err;
5407 break;
5408 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005409 case BINDER_SET_CONTEXT_MGR:
David Brazdil0f672f62019-12-10 10:32:29 +00005410 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005411 if (ret)
5412 goto err;
5413 break;
5414 case BINDER_THREAD_EXIT:
5415 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5416 proc->pid, thread->pid);
5417 binder_thread_release(proc, thread);
5418 thread = NULL;
5419 break;
5420 case BINDER_VERSION: {
5421 struct binder_version __user *ver = ubuf;
5422
5423 if (size != sizeof(struct binder_version)) {
5424 ret = -EINVAL;
5425 goto err;
5426 }
5427 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5428 &ver->protocol_version)) {
5429 ret = -EINVAL;
5430 goto err;
5431 }
5432 break;
5433 }
David Brazdil0f672f62019-12-10 10:32:29 +00005434 case BINDER_GET_NODE_INFO_FOR_REF: {
5435 struct binder_node_info_for_ref info;
5436
5437 if (copy_from_user(&info, ubuf, sizeof(info))) {
5438 ret = -EFAULT;
5439 goto err;
5440 }
5441
5442 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5443 if (ret < 0)
5444 goto err;
5445
5446 if (copy_to_user(ubuf, &info, sizeof(info))) {
5447 ret = -EFAULT;
5448 goto err;
5449 }
5450
5451 break;
5452 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005453 case BINDER_GET_NODE_DEBUG_INFO: {
5454 struct binder_node_debug_info info;
5455
5456 if (copy_from_user(&info, ubuf, sizeof(info))) {
5457 ret = -EFAULT;
5458 goto err;
5459 }
5460
5461 ret = binder_ioctl_get_node_debug_info(proc, &info);
5462 if (ret < 0)
5463 goto err;
5464
5465 if (copy_to_user(ubuf, &info, sizeof(info))) {
5466 ret = -EFAULT;
5467 goto err;
5468 }
5469 break;
5470 }
5471 default:
5472 ret = -EINVAL;
5473 goto err;
5474 }
5475 ret = 0;
5476err:
5477 if (thread)
5478 thread->looper_need_return = false;
5479 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5480 if (ret && ret != -ERESTARTSYS)
5481 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5482err_unlocked:
5483 trace_binder_ioctl_done(ret);
5484 return ret;
5485}
5486
5487static void binder_vma_open(struct vm_area_struct *vma)
5488{
5489 struct binder_proc *proc = vma->vm_private_data;
5490
5491 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5492 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5493 proc->pid, vma->vm_start, vma->vm_end,
5494 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5495 (unsigned long)pgprot_val(vma->vm_page_prot));
5496}
5497
5498static void binder_vma_close(struct vm_area_struct *vma)
5499{
5500 struct binder_proc *proc = vma->vm_private_data;
5501
5502 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5503 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5504 proc->pid, vma->vm_start, vma->vm_end,
5505 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5506 (unsigned long)pgprot_val(vma->vm_page_prot));
5507 binder_alloc_vma_close(&proc->alloc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005508}
5509
5510static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5511{
5512 return VM_FAULT_SIGBUS;
5513}
5514
5515static const struct vm_operations_struct binder_vm_ops = {
5516 .open = binder_vma_open,
5517 .close = binder_vma_close,
5518 .fault = binder_vm_fault,
5519};
5520
5521static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5522{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005523 struct binder_proc *proc = filp->private_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005524
5525 if (proc->tsk != current->group_leader)
5526 return -EINVAL;
5527
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005528 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5529 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5530 __func__, proc->pid, vma->vm_start, vma->vm_end,
5531 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5532 (unsigned long)pgprot_val(vma->vm_page_prot));
5533
5534 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005535 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5536 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5537 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005538 }
5539 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5540 vma->vm_flags &= ~VM_MAYWRITE;
5541
5542 vma->vm_ops = &binder_vm_ops;
5543 vma->vm_private_data = proc;
5544
Olivier Deprez157378f2022-04-04 15:47:50 +02005545 return binder_alloc_mmap_handler(&proc->alloc, vma);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005546}
5547
5548static int binder_open(struct inode *nodp, struct file *filp)
5549{
Olivier Deprez0e641232021-09-23 10:07:05 +02005550 struct binder_proc *proc, *itr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005551 struct binder_device *binder_dev;
David Brazdil0f672f62019-12-10 10:32:29 +00005552 struct binderfs_info *info;
5553 struct dentry *binder_binderfs_dir_entry_proc = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +02005554 bool existing_pid = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005555
5556 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5557 current->group_leader->pid, current->pid);
5558
5559 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5560 if (proc == NULL)
5561 return -ENOMEM;
5562 spin_lock_init(&proc->inner_lock);
5563 spin_lock_init(&proc->outer_lock);
5564 get_task_struct(current->group_leader);
5565 proc->tsk = current->group_leader;
Olivier Deprez157378f2022-04-04 15:47:50 +02005566 proc->cred = get_cred(filp->f_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005567 INIT_LIST_HEAD(&proc->todo);
5568 proc->default_priority = task_nice(current);
David Brazdil0f672f62019-12-10 10:32:29 +00005569 /* binderfs stashes devices in i_private */
5570 if (is_binderfs_device(nodp)) {
5571 binder_dev = nodp->i_private;
5572 info = nodp->i_sb->s_fs_info;
5573 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5574 } else {
5575 binder_dev = container_of(filp->private_data,
5576 struct binder_device, miscdev);
5577 }
Olivier Deprez0e641232021-09-23 10:07:05 +02005578 refcount_inc(&binder_dev->ref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005579 proc->context = &binder_dev->context;
5580 binder_alloc_init(&proc->alloc);
5581
5582 binder_stats_created(BINDER_STAT_PROC);
5583 proc->pid = current->group_leader->pid;
5584 INIT_LIST_HEAD(&proc->delivered_death);
5585 INIT_LIST_HEAD(&proc->waiting_threads);
5586 filp->private_data = proc;
5587
5588 mutex_lock(&binder_procs_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02005589 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5590 if (itr->pid == proc->pid) {
5591 existing_pid = true;
5592 break;
5593 }
5594 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005595 hlist_add_head(&proc->proc_node, &binder_procs);
5596 mutex_unlock(&binder_procs_lock);
5597
Olivier Deprez0e641232021-09-23 10:07:05 +02005598 if (binder_debugfs_dir_entry_proc && !existing_pid) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005599 char strbuf[11];
5600
5601 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5602 /*
Olivier Deprez0e641232021-09-23 10:07:05 +02005603 * proc debug entries are shared between contexts.
5604 * Only create for the first PID to avoid debugfs log spamming
5605 * The printing code will anyway print all contexts for a given
5606 * PID so this is not a problem.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005607 */
5608 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5609 binder_debugfs_dir_entry_proc,
5610 (void *)(unsigned long)proc->pid,
David Brazdil0f672f62019-12-10 10:32:29 +00005611 &proc_fops);
5612 }
5613
Olivier Deprez0e641232021-09-23 10:07:05 +02005614 if (binder_binderfs_dir_entry_proc && !existing_pid) {
David Brazdil0f672f62019-12-10 10:32:29 +00005615 char strbuf[11];
5616 struct dentry *binderfs_entry;
5617
5618 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5619 /*
5620 * Similar to debugfs, the process specific log file is shared
Olivier Deprez0e641232021-09-23 10:07:05 +02005621 * between contexts. Only create for the first PID.
5622 * This is ok since same as debugfs, the log file will contain
5623 * information on all contexts of a given PID.
David Brazdil0f672f62019-12-10 10:32:29 +00005624 */
5625 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5626 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5627 if (!IS_ERR(binderfs_entry)) {
5628 proc->binderfs_entry = binderfs_entry;
5629 } else {
5630 int error;
5631
5632 error = PTR_ERR(binderfs_entry);
Olivier Deprez0e641232021-09-23 10:07:05 +02005633 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5634 strbuf, error);
David Brazdil0f672f62019-12-10 10:32:29 +00005635 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005636 }
5637
5638 return 0;
5639}
5640
5641static int binder_flush(struct file *filp, fl_owner_t id)
5642{
5643 struct binder_proc *proc = filp->private_data;
5644
5645 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5646
5647 return 0;
5648}
5649
5650static void binder_deferred_flush(struct binder_proc *proc)
5651{
5652 struct rb_node *n;
5653 int wake_count = 0;
5654
5655 binder_inner_proc_lock(proc);
5656 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5657 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5658
5659 thread->looper_need_return = true;
5660 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5661 wake_up_interruptible(&thread->wait);
5662 wake_count++;
5663 }
5664 }
5665 binder_inner_proc_unlock(proc);
5666
5667 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5668 "binder_flush: %d woke %d threads\n", proc->pid,
5669 wake_count);
5670}
5671
5672static int binder_release(struct inode *nodp, struct file *filp)
5673{
5674 struct binder_proc *proc = filp->private_data;
5675
5676 debugfs_remove(proc->debugfs_entry);
David Brazdil0f672f62019-12-10 10:32:29 +00005677
5678 if (proc->binderfs_entry) {
5679 binderfs_remove_file(proc->binderfs_entry);
5680 proc->binderfs_entry = NULL;
5681 }
5682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005683 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5684
5685 return 0;
5686}
5687
5688static int binder_node_release(struct binder_node *node, int refs)
5689{
5690 struct binder_ref *ref;
5691 int death = 0;
5692 struct binder_proc *proc = node->proc;
5693
5694 binder_release_work(proc, &node->async_todo);
5695
5696 binder_node_lock(node);
5697 binder_inner_proc_lock(proc);
5698 binder_dequeue_work_ilocked(&node->work);
5699 /*
5700 * The caller must have taken a temporary ref on the node,
5701 */
5702 BUG_ON(!node->tmp_refs);
5703 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5704 binder_inner_proc_unlock(proc);
5705 binder_node_unlock(node);
5706 binder_free_node(node);
5707
5708 return refs;
5709 }
5710
5711 node->proc = NULL;
5712 node->local_strong_refs = 0;
5713 node->local_weak_refs = 0;
5714 binder_inner_proc_unlock(proc);
5715
5716 spin_lock(&binder_dead_nodes_lock);
5717 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5718 spin_unlock(&binder_dead_nodes_lock);
5719
5720 hlist_for_each_entry(ref, &node->refs, node_entry) {
5721 refs++;
5722 /*
5723 * Need the node lock to synchronize
5724 * with new notification requests and the
5725 * inner lock to synchronize with queued
5726 * death notifications.
5727 */
5728 binder_inner_proc_lock(ref->proc);
5729 if (!ref->death) {
5730 binder_inner_proc_unlock(ref->proc);
5731 continue;
5732 }
5733
5734 death++;
5735
5736 BUG_ON(!list_empty(&ref->death->work.entry));
5737 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5738 binder_enqueue_work_ilocked(&ref->death->work,
5739 &ref->proc->todo);
5740 binder_wakeup_proc_ilocked(ref->proc);
5741 binder_inner_proc_unlock(ref->proc);
5742 }
5743
5744 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5745 "node %d now dead, refs %d, death %d\n",
5746 node->debug_id, refs, death);
5747 binder_node_unlock(node);
5748 binder_put_node(node);
5749
5750 return refs;
5751}
5752
5753static void binder_deferred_release(struct binder_proc *proc)
5754{
5755 struct binder_context *context = proc->context;
5756 struct rb_node *n;
5757 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005759 mutex_lock(&binder_procs_lock);
5760 hlist_del(&proc->proc_node);
5761 mutex_unlock(&binder_procs_lock);
5762
5763 mutex_lock(&context->context_mgr_node_lock);
5764 if (context->binder_context_mgr_node &&
5765 context->binder_context_mgr_node->proc == proc) {
5766 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5767 "%s: %d context_mgr_node gone\n",
5768 __func__, proc->pid);
5769 context->binder_context_mgr_node = NULL;
5770 }
5771 mutex_unlock(&context->context_mgr_node_lock);
5772 binder_inner_proc_lock(proc);
5773 /*
5774 * Make sure proc stays alive after we
5775 * remove all the threads
5776 */
5777 proc->tmp_ref++;
5778
5779 proc->is_dead = true;
5780 threads = 0;
5781 active_transactions = 0;
5782 while ((n = rb_first(&proc->threads))) {
5783 struct binder_thread *thread;
5784
5785 thread = rb_entry(n, struct binder_thread, rb_node);
5786 binder_inner_proc_unlock(proc);
5787 threads++;
5788 active_transactions += binder_thread_release(proc, thread);
5789 binder_inner_proc_lock(proc);
5790 }
5791
5792 nodes = 0;
5793 incoming_refs = 0;
5794 while ((n = rb_first(&proc->nodes))) {
5795 struct binder_node *node;
5796
5797 node = rb_entry(n, struct binder_node, rb_node);
5798 nodes++;
5799 /*
5800 * take a temporary ref on the node before
5801 * calling binder_node_release() which will either
5802 * kfree() the node or call binder_put_node()
5803 */
5804 binder_inc_node_tmpref_ilocked(node);
5805 rb_erase(&node->rb_node, &proc->nodes);
5806 binder_inner_proc_unlock(proc);
5807 incoming_refs = binder_node_release(node, incoming_refs);
5808 binder_inner_proc_lock(proc);
5809 }
5810 binder_inner_proc_unlock(proc);
5811
5812 outgoing_refs = 0;
5813 binder_proc_lock(proc);
5814 while ((n = rb_first(&proc->refs_by_desc))) {
5815 struct binder_ref *ref;
5816
5817 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5818 outgoing_refs++;
5819 binder_cleanup_ref_olocked(ref);
5820 binder_proc_unlock(proc);
5821 binder_free_ref(ref);
5822 binder_proc_lock(proc);
5823 }
5824 binder_proc_unlock(proc);
5825
5826 binder_release_work(proc, &proc->todo);
5827 binder_release_work(proc, &proc->delivered_death);
5828
5829 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5830 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5831 __func__, proc->pid, threads, nodes, incoming_refs,
5832 outgoing_refs, active_transactions);
5833
5834 binder_proc_dec_tmpref(proc);
5835}
5836
5837static void binder_deferred_func(struct work_struct *work)
5838{
5839 struct binder_proc *proc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005840
5841 int defer;
5842
5843 do {
5844 mutex_lock(&binder_deferred_lock);
5845 if (!hlist_empty(&binder_deferred_list)) {
5846 proc = hlist_entry(binder_deferred_list.first,
5847 struct binder_proc, deferred_work_node);
5848 hlist_del_init(&proc->deferred_work_node);
5849 defer = proc->deferred_work;
5850 proc->deferred_work = 0;
5851 } else {
5852 proc = NULL;
5853 defer = 0;
5854 }
5855 mutex_unlock(&binder_deferred_lock);
5856
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005857 if (defer & BINDER_DEFERRED_FLUSH)
5858 binder_deferred_flush(proc);
5859
5860 if (defer & BINDER_DEFERRED_RELEASE)
5861 binder_deferred_release(proc); /* frees proc */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005862 } while (proc);
5863}
5864static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5865
5866static void
5867binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5868{
5869 mutex_lock(&binder_deferred_lock);
5870 proc->deferred_work |= defer;
5871 if (hlist_unhashed(&proc->deferred_work_node)) {
5872 hlist_add_head(&proc->deferred_work_node,
5873 &binder_deferred_list);
5874 schedule_work(&binder_deferred_work);
5875 }
5876 mutex_unlock(&binder_deferred_lock);
5877}
5878
5879static void print_binder_transaction_ilocked(struct seq_file *m,
5880 struct binder_proc *proc,
5881 const char *prefix,
5882 struct binder_transaction *t)
5883{
5884 struct binder_proc *to_proc;
5885 struct binder_buffer *buffer = t->buffer;
5886
5887 spin_lock(&t->lock);
5888 to_proc = t->to_proc;
5889 seq_printf(m,
5890 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5891 prefix, t->debug_id, t,
5892 t->from ? t->from->proc->pid : 0,
5893 t->from ? t->from->pid : 0,
5894 to_proc ? to_proc->pid : 0,
5895 t->to_thread ? t->to_thread->pid : 0,
5896 t->code, t->flags, t->priority, t->need_reply);
5897 spin_unlock(&t->lock);
5898
5899 if (proc != to_proc) {
5900 /*
5901 * Can only safely deref buffer if we are holding the
5902 * correct proc inner lock for this node
5903 */
5904 seq_puts(m, "\n");
5905 return;
5906 }
5907
5908 if (buffer == NULL) {
5909 seq_puts(m, " buffer free\n");
5910 return;
5911 }
5912 if (buffer->target_node)
5913 seq_printf(m, " node %d", buffer->target_node->debug_id);
5914 seq_printf(m, " size %zd:%zd data %pK\n",
5915 buffer->data_size, buffer->offsets_size,
David Brazdil0f672f62019-12-10 10:32:29 +00005916 buffer->user_data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005917}
5918
5919static void print_binder_work_ilocked(struct seq_file *m,
5920 struct binder_proc *proc,
5921 const char *prefix,
5922 const char *transaction_prefix,
5923 struct binder_work *w)
5924{
5925 struct binder_node *node;
5926 struct binder_transaction *t;
5927
5928 switch (w->type) {
5929 case BINDER_WORK_TRANSACTION:
5930 t = container_of(w, struct binder_transaction, work);
5931 print_binder_transaction_ilocked(
5932 m, proc, transaction_prefix, t);
5933 break;
5934 case BINDER_WORK_RETURN_ERROR: {
5935 struct binder_error *e = container_of(
5936 w, struct binder_error, work);
5937
5938 seq_printf(m, "%stransaction error: %u\n",
5939 prefix, e->cmd);
5940 } break;
5941 case BINDER_WORK_TRANSACTION_COMPLETE:
5942 seq_printf(m, "%stransaction complete\n", prefix);
5943 break;
5944 case BINDER_WORK_NODE:
5945 node = container_of(w, struct binder_node, work);
5946 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5947 prefix, node->debug_id,
5948 (u64)node->ptr, (u64)node->cookie);
5949 break;
5950 case BINDER_WORK_DEAD_BINDER:
5951 seq_printf(m, "%shas dead binder\n", prefix);
5952 break;
5953 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5954 seq_printf(m, "%shas cleared dead binder\n", prefix);
5955 break;
5956 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5957 seq_printf(m, "%shas cleared death notification\n", prefix);
5958 break;
5959 default:
5960 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5961 break;
5962 }
5963}
5964
5965static void print_binder_thread_ilocked(struct seq_file *m,
5966 struct binder_thread *thread,
5967 int print_always)
5968{
5969 struct binder_transaction *t;
5970 struct binder_work *w;
5971 size_t start_pos = m->count;
5972 size_t header_pos;
5973
5974 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5975 thread->pid, thread->looper,
5976 thread->looper_need_return,
5977 atomic_read(&thread->tmp_ref));
5978 header_pos = m->count;
5979 t = thread->transaction_stack;
5980 while (t) {
5981 if (t->from == thread) {
5982 print_binder_transaction_ilocked(m, thread->proc,
5983 " outgoing transaction", t);
5984 t = t->from_parent;
5985 } else if (t->to_thread == thread) {
5986 print_binder_transaction_ilocked(m, thread->proc,
5987 " incoming transaction", t);
5988 t = t->to_parent;
5989 } else {
5990 print_binder_transaction_ilocked(m, thread->proc,
5991 " bad transaction", t);
5992 t = NULL;
5993 }
5994 }
5995 list_for_each_entry(w, &thread->todo, entry) {
5996 print_binder_work_ilocked(m, thread->proc, " ",
5997 " pending transaction", w);
5998 }
5999 if (!print_always && m->count == header_pos)
6000 m->count = start_pos;
6001}
6002
6003static void print_binder_node_nilocked(struct seq_file *m,
6004 struct binder_node *node)
6005{
6006 struct binder_ref *ref;
6007 struct binder_work *w;
6008 int count;
6009
6010 count = 0;
6011 hlist_for_each_entry(ref, &node->refs, node_entry)
6012 count++;
6013
6014 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6015 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6016 node->has_strong_ref, node->has_weak_ref,
6017 node->local_strong_refs, node->local_weak_refs,
6018 node->internal_strong_refs, count, node->tmp_refs);
6019 if (count) {
6020 seq_puts(m, " proc");
6021 hlist_for_each_entry(ref, &node->refs, node_entry)
6022 seq_printf(m, " %d", ref->proc->pid);
6023 }
6024 seq_puts(m, "\n");
6025 if (node->proc) {
6026 list_for_each_entry(w, &node->async_todo, entry)
6027 print_binder_work_ilocked(m, node->proc, " ",
6028 " pending async transaction", w);
6029 }
6030}
6031
6032static void print_binder_ref_olocked(struct seq_file *m,
6033 struct binder_ref *ref)
6034{
6035 binder_node_lock(ref->node);
6036 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6037 ref->data.debug_id, ref->data.desc,
6038 ref->node->proc ? "" : "dead ",
6039 ref->node->debug_id, ref->data.strong,
6040 ref->data.weak, ref->death);
6041 binder_node_unlock(ref->node);
6042}
6043
6044static void print_binder_proc(struct seq_file *m,
6045 struct binder_proc *proc, int print_all)
6046{
6047 struct binder_work *w;
6048 struct rb_node *n;
6049 size_t start_pos = m->count;
6050 size_t header_pos;
6051 struct binder_node *last_node = NULL;
6052
6053 seq_printf(m, "proc %d\n", proc->pid);
6054 seq_printf(m, "context %s\n", proc->context->name);
6055 header_pos = m->count;
6056
6057 binder_inner_proc_lock(proc);
6058 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6059 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6060 rb_node), print_all);
6061
6062 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6063 struct binder_node *node = rb_entry(n, struct binder_node,
6064 rb_node);
David Brazdil0f672f62019-12-10 10:32:29 +00006065 if (!print_all && !node->has_async_transaction)
6066 continue;
6067
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006068 /*
6069 * take a temporary reference on the node so it
6070 * survives and isn't removed from the tree
6071 * while we print it.
6072 */
6073 binder_inc_node_tmpref_ilocked(node);
6074 /* Need to drop inner lock to take node lock */
6075 binder_inner_proc_unlock(proc);
6076 if (last_node)
6077 binder_put_node(last_node);
6078 binder_node_inner_lock(node);
6079 print_binder_node_nilocked(m, node);
6080 binder_node_inner_unlock(node);
6081 last_node = node;
6082 binder_inner_proc_lock(proc);
6083 }
6084 binder_inner_proc_unlock(proc);
6085 if (last_node)
6086 binder_put_node(last_node);
6087
6088 if (print_all) {
6089 binder_proc_lock(proc);
6090 for (n = rb_first(&proc->refs_by_desc);
6091 n != NULL;
6092 n = rb_next(n))
6093 print_binder_ref_olocked(m, rb_entry(n,
6094 struct binder_ref,
6095 rb_node_desc));
6096 binder_proc_unlock(proc);
6097 }
6098 binder_alloc_print_allocated(m, &proc->alloc);
6099 binder_inner_proc_lock(proc);
6100 list_for_each_entry(w, &proc->todo, entry)
6101 print_binder_work_ilocked(m, proc, " ",
6102 " pending transaction", w);
6103 list_for_each_entry(w, &proc->delivered_death, entry) {
6104 seq_puts(m, " has delivered dead binder\n");
6105 break;
6106 }
6107 binder_inner_proc_unlock(proc);
6108 if (!print_all && m->count == header_pos)
6109 m->count = start_pos;
6110}
6111
6112static const char * const binder_return_strings[] = {
6113 "BR_ERROR",
6114 "BR_OK",
6115 "BR_TRANSACTION",
6116 "BR_REPLY",
6117 "BR_ACQUIRE_RESULT",
6118 "BR_DEAD_REPLY",
6119 "BR_TRANSACTION_COMPLETE",
6120 "BR_INCREFS",
6121 "BR_ACQUIRE",
6122 "BR_RELEASE",
6123 "BR_DECREFS",
6124 "BR_ATTEMPT_ACQUIRE",
6125 "BR_NOOP",
6126 "BR_SPAWN_LOOPER",
6127 "BR_FINISHED",
6128 "BR_DEAD_BINDER",
6129 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6130 "BR_FAILED_REPLY"
6131};
6132
6133static const char * const binder_command_strings[] = {
6134 "BC_TRANSACTION",
6135 "BC_REPLY",
6136 "BC_ACQUIRE_RESULT",
6137 "BC_FREE_BUFFER",
6138 "BC_INCREFS",
6139 "BC_ACQUIRE",
6140 "BC_RELEASE",
6141 "BC_DECREFS",
6142 "BC_INCREFS_DONE",
6143 "BC_ACQUIRE_DONE",
6144 "BC_ATTEMPT_ACQUIRE",
6145 "BC_REGISTER_LOOPER",
6146 "BC_ENTER_LOOPER",
6147 "BC_EXIT_LOOPER",
6148 "BC_REQUEST_DEATH_NOTIFICATION",
6149 "BC_CLEAR_DEATH_NOTIFICATION",
6150 "BC_DEAD_BINDER_DONE",
6151 "BC_TRANSACTION_SG",
6152 "BC_REPLY_SG",
6153};
6154
6155static const char * const binder_objstat_strings[] = {
6156 "proc",
6157 "thread",
6158 "node",
6159 "ref",
6160 "death",
6161 "transaction",
6162 "transaction_complete"
6163};
6164
6165static void print_binder_stats(struct seq_file *m, const char *prefix,
6166 struct binder_stats *stats)
6167{
6168 int i;
6169
6170 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6171 ARRAY_SIZE(binder_command_strings));
6172 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6173 int temp = atomic_read(&stats->bc[i]);
6174
6175 if (temp)
6176 seq_printf(m, "%s%s: %d\n", prefix,
6177 binder_command_strings[i], temp);
6178 }
6179
6180 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6181 ARRAY_SIZE(binder_return_strings));
6182 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6183 int temp = atomic_read(&stats->br[i]);
6184
6185 if (temp)
6186 seq_printf(m, "%s%s: %d\n", prefix,
6187 binder_return_strings[i], temp);
6188 }
6189
6190 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6191 ARRAY_SIZE(binder_objstat_strings));
6192 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6193 ARRAY_SIZE(stats->obj_deleted));
6194 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6195 int created = atomic_read(&stats->obj_created[i]);
6196 int deleted = atomic_read(&stats->obj_deleted[i]);
6197
6198 if (created || deleted)
6199 seq_printf(m, "%s%s: active %d total %d\n",
6200 prefix,
6201 binder_objstat_strings[i],
6202 created - deleted,
6203 created);
6204 }
6205}
6206
6207static void print_binder_proc_stats(struct seq_file *m,
6208 struct binder_proc *proc)
6209{
6210 struct binder_work *w;
6211 struct binder_thread *thread;
6212 struct rb_node *n;
6213 int count, strong, weak, ready_threads;
6214 size_t free_async_space =
6215 binder_alloc_get_free_async_space(&proc->alloc);
6216
6217 seq_printf(m, "proc %d\n", proc->pid);
6218 seq_printf(m, "context %s\n", proc->context->name);
6219 count = 0;
6220 ready_threads = 0;
6221 binder_inner_proc_lock(proc);
6222 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6223 count++;
6224
6225 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6226 ready_threads++;
6227
6228 seq_printf(m, " threads: %d\n", count);
6229 seq_printf(m, " requested threads: %d+%d/%d\n"
6230 " ready threads %d\n"
6231 " free async space %zd\n", proc->requested_threads,
6232 proc->requested_threads_started, proc->max_threads,
6233 ready_threads,
6234 free_async_space);
6235 count = 0;
6236 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6237 count++;
6238 binder_inner_proc_unlock(proc);
6239 seq_printf(m, " nodes: %d\n", count);
6240 count = 0;
6241 strong = 0;
6242 weak = 0;
6243 binder_proc_lock(proc);
6244 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6245 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6246 rb_node_desc);
6247 count++;
6248 strong += ref->data.strong;
6249 weak += ref->data.weak;
6250 }
6251 binder_proc_unlock(proc);
6252 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6253
6254 count = binder_alloc_get_allocated_count(&proc->alloc);
6255 seq_printf(m, " buffers: %d\n", count);
6256
6257 binder_alloc_print_pages(m, &proc->alloc);
6258
6259 count = 0;
6260 binder_inner_proc_lock(proc);
6261 list_for_each_entry(w, &proc->todo, entry) {
6262 if (w->type == BINDER_WORK_TRANSACTION)
6263 count++;
6264 }
6265 binder_inner_proc_unlock(proc);
6266 seq_printf(m, " pending transactions: %d\n", count);
6267
6268 print_binder_stats(m, " ", &proc->stats);
6269}
6270
6271
David Brazdil0f672f62019-12-10 10:32:29 +00006272int binder_state_show(struct seq_file *m, void *unused)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006273{
6274 struct binder_proc *proc;
6275 struct binder_node *node;
6276 struct binder_node *last_node = NULL;
6277
6278 seq_puts(m, "binder state:\n");
6279
6280 spin_lock(&binder_dead_nodes_lock);
6281 if (!hlist_empty(&binder_dead_nodes))
6282 seq_puts(m, "dead nodes:\n");
6283 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6284 /*
6285 * take a temporary reference on the node so it
6286 * survives and isn't removed from the list
6287 * while we print it.
6288 */
6289 node->tmp_refs++;
6290 spin_unlock(&binder_dead_nodes_lock);
6291 if (last_node)
6292 binder_put_node(last_node);
6293 binder_node_lock(node);
6294 print_binder_node_nilocked(m, node);
6295 binder_node_unlock(node);
6296 last_node = node;
6297 spin_lock(&binder_dead_nodes_lock);
6298 }
6299 spin_unlock(&binder_dead_nodes_lock);
6300 if (last_node)
6301 binder_put_node(last_node);
6302
6303 mutex_lock(&binder_procs_lock);
6304 hlist_for_each_entry(proc, &binder_procs, proc_node)
6305 print_binder_proc(m, proc, 1);
6306 mutex_unlock(&binder_procs_lock);
6307
6308 return 0;
6309}
6310
David Brazdil0f672f62019-12-10 10:32:29 +00006311int binder_stats_show(struct seq_file *m, void *unused)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006312{
6313 struct binder_proc *proc;
6314
6315 seq_puts(m, "binder stats:\n");
6316
6317 print_binder_stats(m, "", &binder_stats);
6318
6319 mutex_lock(&binder_procs_lock);
6320 hlist_for_each_entry(proc, &binder_procs, proc_node)
6321 print_binder_proc_stats(m, proc);
6322 mutex_unlock(&binder_procs_lock);
6323
6324 return 0;
6325}
6326
David Brazdil0f672f62019-12-10 10:32:29 +00006327int binder_transactions_show(struct seq_file *m, void *unused)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006328{
6329 struct binder_proc *proc;
6330
6331 seq_puts(m, "binder transactions:\n");
6332 mutex_lock(&binder_procs_lock);
6333 hlist_for_each_entry(proc, &binder_procs, proc_node)
6334 print_binder_proc(m, proc, 0);
6335 mutex_unlock(&binder_procs_lock);
6336
6337 return 0;
6338}
6339
David Brazdil0f672f62019-12-10 10:32:29 +00006340static int proc_show(struct seq_file *m, void *unused)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006341{
6342 struct binder_proc *itr;
6343 int pid = (unsigned long)m->private;
6344
6345 mutex_lock(&binder_procs_lock);
6346 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6347 if (itr->pid == pid) {
6348 seq_puts(m, "binder proc state:\n");
6349 print_binder_proc(m, itr, 1);
6350 }
6351 }
6352 mutex_unlock(&binder_procs_lock);
6353
6354 return 0;
6355}
6356
6357static void print_binder_transaction_log_entry(struct seq_file *m,
6358 struct binder_transaction_log_entry *e)
6359{
6360 int debug_id = READ_ONCE(e->debug_id_done);
6361 /*
6362 * read barrier to guarantee debug_id_done read before
6363 * we print the log values
6364 */
6365 smp_rmb();
6366 seq_printf(m,
6367 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6368 e->debug_id, (e->call_type == 2) ? "reply" :
6369 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6370 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6371 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6372 e->return_error, e->return_error_param,
6373 e->return_error_line);
6374 /*
6375 * read-barrier to guarantee read of debug_id_done after
6376 * done printing the fields of the entry
6377 */
6378 smp_rmb();
6379 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6380 "\n" : " (incomplete)\n");
6381}
6382
David Brazdil0f672f62019-12-10 10:32:29 +00006383int binder_transaction_log_show(struct seq_file *m, void *unused)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006384{
6385 struct binder_transaction_log *log = m->private;
6386 unsigned int log_cur = atomic_read(&log->cur);
6387 unsigned int count;
6388 unsigned int cur;
6389 int i;
6390
6391 count = log_cur + 1;
6392 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6393 0 : count % ARRAY_SIZE(log->entry);
6394 if (count > ARRAY_SIZE(log->entry) || log->full)
6395 count = ARRAY_SIZE(log->entry);
6396 for (i = 0; i < count; i++) {
6397 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6398
6399 print_binder_transaction_log_entry(m, &log->entry[index]);
6400 }
6401 return 0;
6402}
6403
David Brazdil0f672f62019-12-10 10:32:29 +00006404const struct file_operations binder_fops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006405 .owner = THIS_MODULE,
6406 .poll = binder_poll,
6407 .unlocked_ioctl = binder_ioctl,
Olivier Deprez157378f2022-04-04 15:47:50 +02006408 .compat_ioctl = compat_ptr_ioctl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006409 .mmap = binder_mmap,
6410 .open = binder_open,
6411 .flush = binder_flush,
6412 .release = binder_release,
Olivier Deprez92d4c212022-12-06 15:05:30 +01006413 .may_pollfree = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006414};
6415
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006416static int __init init_binder_device(const char *name)
6417{
6418 int ret;
6419 struct binder_device *binder_device;
6420
6421 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6422 if (!binder_device)
6423 return -ENOMEM;
6424
6425 binder_device->miscdev.fops = &binder_fops;
6426 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6427 binder_device->miscdev.name = name;
6428
Olivier Deprez0e641232021-09-23 10:07:05 +02006429 refcount_set(&binder_device->ref, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006430 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6431 binder_device->context.name = name;
6432 mutex_init(&binder_device->context.context_mgr_node_lock);
6433
6434 ret = misc_register(&binder_device->miscdev);
6435 if (ret < 0) {
6436 kfree(binder_device);
6437 return ret;
6438 }
6439
6440 hlist_add_head(&binder_device->hlist, &binder_devices);
6441
6442 return ret;
6443}
6444
6445static int __init binder_init(void)
6446{
6447 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00006448 char *device_name, *device_tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006449 struct binder_device *device;
6450 struct hlist_node *tmp;
David Brazdil0f672f62019-12-10 10:32:29 +00006451 char *device_names = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006452
6453 ret = binder_alloc_shrinker_init();
6454 if (ret)
6455 return ret;
6456
6457 atomic_set(&binder_transaction_log.cur, ~0U);
6458 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6459
6460 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6461 if (binder_debugfs_dir_entry_root)
6462 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6463 binder_debugfs_dir_entry_root);
6464
6465 if (binder_debugfs_dir_entry_root) {
6466 debugfs_create_file("state",
6467 0444,
6468 binder_debugfs_dir_entry_root,
6469 NULL,
6470 &binder_state_fops);
6471 debugfs_create_file("stats",
6472 0444,
6473 binder_debugfs_dir_entry_root,
6474 NULL,
6475 &binder_stats_fops);
6476 debugfs_create_file("transactions",
6477 0444,
6478 binder_debugfs_dir_entry_root,
6479 NULL,
6480 &binder_transactions_fops);
6481 debugfs_create_file("transaction_log",
6482 0444,
6483 binder_debugfs_dir_entry_root,
6484 &binder_transaction_log,
6485 &binder_transaction_log_fops);
6486 debugfs_create_file("failed_transaction_log",
6487 0444,
6488 binder_debugfs_dir_entry_root,
6489 &binder_transaction_log_failed,
6490 &binder_transaction_log_fops);
6491 }
6492
David Brazdil0f672f62019-12-10 10:32:29 +00006493 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6494 strcmp(binder_devices_param, "") != 0) {
6495 /*
6496 * Copy the module_parameter string, because we don't want to
6497 * tokenize it in-place.
6498 */
6499 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6500 if (!device_names) {
6501 ret = -ENOMEM;
6502 goto err_alloc_device_names_failed;
6503 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006504
David Brazdil0f672f62019-12-10 10:32:29 +00006505 device_tmp = device_names;
6506 while ((device_name = strsep(&device_tmp, ","))) {
6507 ret = init_binder_device(device_name);
6508 if (ret)
6509 goto err_init_binder_device_failed;
6510 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006511 }
6512
David Brazdil0f672f62019-12-10 10:32:29 +00006513 ret = init_binderfs();
6514 if (ret)
6515 goto err_init_binder_device_failed;
6516
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006517 return ret;
6518
6519err_init_binder_device_failed:
6520 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6521 misc_deregister(&device->miscdev);
6522 hlist_del(&device->hlist);
6523 kfree(device);
6524 }
6525
6526 kfree(device_names);
6527
6528err_alloc_device_names_failed:
6529 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6530
6531 return ret;
6532}
6533
6534device_initcall(binder_init);
6535
6536#define CREATE_TRACE_POINTS
6537#include "binder_trace.h"
6538
6539MODULE_LICENSE("GPL v2");