1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (c) 2021 Facebook
3 */
4
5#ifndef __MMAP_UNLOCK_WORK_H__
6#define __MMAP_UNLOCK_WORK_H__
7#include <linux/irq_work.h>
8
9/* irq_work to run mmap_read_unlock() in irq_work */
10struct mmap_unlock_irq_work {
11 struct irq_work irq_work;
12 struct mm_struct *mm;
13};
14
15DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
16
17/*
18 * We cannot do mmap_read_unlock() when the irq is disabled, because of
19 * risk to deadlock with rq_lock. To look up vma when the irqs are
20 * disabled, we need to run mmap_read_unlock() in irq_work. We use a
21 * percpu variable to do the irq_work. If the irq_work is already used
22 * by another lookup, we fall over.
23 */
24static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
25{
26 struct mmap_unlock_irq_work *work = NULL;
27 bool irq_work_busy = false;
28
29 if (irqs_disabled()) {
30 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
31 work = this_cpu_ptr(&mmap_unlock_work);
32 if (irq_work_is_busy(work: &work->irq_work)) {
33 /* cannot queue more up_read, fallback */
34 irq_work_busy = true;
35 }
36 } else {
37 /*
38 * PREEMPT_RT does not allow to trylock mmap sem in
39 * interrupt disabled context. Force the fallback code.
40 */
41 irq_work_busy = true;
42 }
43 }
44
45 *work_ptr = work;
46 return irq_work_busy;
47}
48
49static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
50{
51 if (!work) {
52 mmap_read_unlock(mm);
53 } else {
54 work->mm = mm;
55
56 /* The lock will be released once we're out of interrupt
57 * context. Tell lockdep that we've released it now so
58 * it doesn't complain that we forgot to release it.
59 */
60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
61 irq_work_queue(work: &work->irq_work);
62 }
63}
64
65#endif /* __MMAP_UNLOCK_WORK_H__ */
66

source code of linux/kernel/bpf/mmap_unlock_work.h