1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> |
4 | */ |
5 | |
6 | /* |
7 | * Basic idea behind the notification queue: An fsnotify group (like inotify) |
8 | * sends the userspace notification about events asynchronously some time after |
9 | * the event happened. When inotify gets an event it will need to add that |
10 | * event to the group notify queue. Since a single event might need to be on |
11 | * multiple group's notification queues we can't add the event directly to each |
12 | * queue and instead add a small "event_holder" to each queue. This event_holder |
13 | * has a pointer back to the original event. Since the majority of events are |
14 | * going to end up on one, and only one, notification queue we embed one |
15 | * event_holder into each event. This means we have a single allocation instead |
16 | * of always needing two. If the embedded event_holder is already in use by |
17 | * another group a new event_holder (from fsnotify_event_holder_cachep) will be |
18 | * allocated and used. |
19 | */ |
20 | |
21 | #include <linux/fs.h> |
22 | #include <linux/init.h> |
23 | #include <linux/kernel.h> |
24 | #include <linux/list.h> |
25 | #include <linux/module.h> |
26 | #include <linux/mount.h> |
27 | #include <linux/mutex.h> |
28 | #include <linux/namei.h> |
29 | #include <linux/path.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/spinlock.h> |
32 | |
33 | #include <linux/atomic.h> |
34 | |
35 | #include <linux/fsnotify_backend.h> |
36 | #include "fsnotify.h" |
37 | |
38 | static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); |
39 | |
40 | /** |
41 | * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. |
42 | * Called from fsnotify_move, which is inlined into filesystem modules. |
43 | */ |
44 | u32 fsnotify_get_cookie(void) |
45 | { |
46 | return atomic_inc_return(v: &fsnotify_sync_cookie); |
47 | } |
48 | EXPORT_SYMBOL_GPL(fsnotify_get_cookie); |
49 | |
50 | void fsnotify_destroy_event(struct fsnotify_group *group, |
51 | struct fsnotify_event *event) |
52 | { |
53 | /* Overflow events are per-group and we don't want to free them */ |
54 | if (!event || event == group->overflow_event) |
55 | return; |
56 | /* |
57 | * If the event is still queued, we have a problem... Do an unreliable |
58 | * lockless check first to avoid locking in the common case. The |
59 | * locking may be necessary for permission events which got removed |
60 | * from the list by a different CPU than the one freeing the event. |
61 | */ |
62 | if (!list_empty(head: &event->list)) { |
63 | spin_lock(lock: &group->notification_lock); |
64 | WARN_ON(!list_empty(&event->list)); |
65 | spin_unlock(lock: &group->notification_lock); |
66 | } |
67 | group->ops->free_event(group, event); |
68 | } |
69 | |
70 | /* |
71 | * Try to add an event to the notification queue. |
72 | * The group can later pull this event off the queue to deal with. |
73 | * The group can use the @merge hook to merge the event with a queued event. |
74 | * The group can use the @insert hook to insert the event into hash table. |
75 | * The function returns: |
76 | * 0 if the event was added to a queue |
77 | * 1 if the event was merged with some other queued event |
78 | * 2 if the event was not queued - either the queue of events has overflown |
79 | * or the group is shutting down. |
80 | */ |
81 | int fsnotify_insert_event(struct fsnotify_group *group, |
82 | struct fsnotify_event *event, |
83 | int (*merge)(struct fsnotify_group *, |
84 | struct fsnotify_event *), |
85 | void (*insert)(struct fsnotify_group *, |
86 | struct fsnotify_event *)) |
87 | { |
88 | int ret = 0; |
89 | struct list_head *list = &group->notification_list; |
90 | |
91 | pr_debug("%s: group=%p event=%p\n" , __func__, group, event); |
92 | |
93 | spin_lock(lock: &group->notification_lock); |
94 | |
95 | if (group->shutdown) { |
96 | spin_unlock(lock: &group->notification_lock); |
97 | return 2; |
98 | } |
99 | |
100 | if (event == group->overflow_event || |
101 | group->q_len >= group->max_events) { |
102 | ret = 2; |
103 | /* Queue overflow event only if it isn't already queued */ |
104 | if (!list_empty(head: &group->overflow_event->list)) { |
105 | spin_unlock(lock: &group->notification_lock); |
106 | return ret; |
107 | } |
108 | event = group->overflow_event; |
109 | goto queue; |
110 | } |
111 | |
112 | if (!list_empty(head: list) && merge) { |
113 | ret = merge(group, event); |
114 | if (ret) { |
115 | spin_unlock(lock: &group->notification_lock); |
116 | return ret; |
117 | } |
118 | } |
119 | |
120 | queue: |
121 | group->q_len++; |
122 | list_add_tail(new: &event->list, head: list); |
123 | if (insert) |
124 | insert(group, event); |
125 | spin_unlock(lock: &group->notification_lock); |
126 | |
127 | wake_up(&group->notification_waitq); |
128 | kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); |
129 | return ret; |
130 | } |
131 | |
132 | void fsnotify_remove_queued_event(struct fsnotify_group *group, |
133 | struct fsnotify_event *event) |
134 | { |
135 | assert_spin_locked(&group->notification_lock); |
136 | /* |
137 | * We need to init list head for the case of overflow event so that |
138 | * check in fsnotify_add_event() works |
139 | */ |
140 | list_del_init(entry: &event->list); |
141 | group->q_len--; |
142 | } |
143 | |
144 | /* |
145 | * Return the first event on the notification list without removing it. |
146 | * Returns NULL if the list is empty. |
147 | */ |
148 | struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group) |
149 | { |
150 | assert_spin_locked(&group->notification_lock); |
151 | |
152 | if (fsnotify_notify_queue_is_empty(group)) |
153 | return NULL; |
154 | |
155 | return list_first_entry(&group->notification_list, |
156 | struct fsnotify_event, list); |
157 | } |
158 | |
159 | /* |
160 | * Remove and return the first event from the notification list. It is the |
161 | * responsibility of the caller to destroy the obtained event |
162 | */ |
163 | struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) |
164 | { |
165 | struct fsnotify_event *event = fsnotify_peek_first_event(group); |
166 | |
167 | if (!event) |
168 | return NULL; |
169 | |
170 | pr_debug("%s: group=%p event=%p\n" , __func__, group, event); |
171 | |
172 | fsnotify_remove_queued_event(group, event); |
173 | |
174 | return event; |
175 | } |
176 | |
177 | /* |
178 | * Called when a group is being torn down to clean up any outstanding |
179 | * event notifications. |
180 | */ |
181 | void fsnotify_flush_notify(struct fsnotify_group *group) |
182 | { |
183 | struct fsnotify_event *event; |
184 | |
185 | spin_lock(lock: &group->notification_lock); |
186 | while (!fsnotify_notify_queue_is_empty(group)) { |
187 | event = fsnotify_remove_first_event(group); |
188 | spin_unlock(lock: &group->notification_lock); |
189 | fsnotify_destroy_event(group, event); |
190 | spin_lock(lock: &group->notification_lock); |
191 | } |
192 | spin_unlock(lock: &group->notification_lock); |
193 | } |
194 | |