1// SPDX-License-Identifier: GPL-2.0-or-later
2/* FS-Cache cache handling
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define FSCACHE_DEBUG_LEVEL CACHE
9#include <linux/export.h>
10#include <linux/slab.h>
11#include "internal.h"
12
13static LIST_HEAD(fscache_caches);
14DECLARE_RWSEM(fscache_addremove_sem);
15EXPORT_SYMBOL(fscache_addremove_sem);
16DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
17EXPORT_SYMBOL(fscache_clearance_waiters);
18
19static atomic_t fscache_cache_debug_id;
20
21/*
22 * Allocate a cache cookie.
23 */
24static struct fscache_cache *fscache_alloc_cache(const char *name)
25{
26 struct fscache_cache *cache;
27
28 cache = kzalloc(size: sizeof(*cache), GFP_KERNEL);
29 if (cache) {
30 if (name) {
31 cache->name = kstrdup(s: name, GFP_KERNEL);
32 if (!cache->name) {
33 kfree(objp: cache);
34 return NULL;
35 }
36 }
37 refcount_set(r: &cache->ref, n: 1);
38 INIT_LIST_HEAD(list: &cache->cache_link);
39 cache->debug_id = atomic_inc_return(v: &fscache_cache_debug_id);
40 }
41 return cache;
42}
43
44static bool fscache_get_cache_maybe(struct fscache_cache *cache,
45 enum fscache_cache_trace where)
46{
47 bool success;
48 int ref;
49
50 success = __refcount_inc_not_zero(r: &cache->ref, oldp: &ref);
51 if (success)
52 trace_fscache_cache(cache_debug_id: cache->debug_id, usage: ref + 1, where);
53 return success;
54}
55
56/*
57 * Look up a cache cookie.
58 */
59struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
60{
61 struct fscache_cache *candidate, *cache, *unnamed = NULL;
62
63 /* firstly check for the existence of the cache under read lock */
64 down_read(sem: &fscache_addremove_sem);
65
66 list_for_each_entry(cache, &fscache_caches, cache_link) {
67 if (cache->name && name && strcmp(cache->name, name) == 0 &&
68 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
69 goto got_cache_r;
70 if (!cache->name && !name &&
71 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
72 goto got_cache_r;
73 }
74
75 if (!name) {
76 list_for_each_entry(cache, &fscache_caches, cache_link) {
77 if (cache->name &&
78 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
79 goto got_cache_r;
80 }
81 }
82
83 up_read(sem: &fscache_addremove_sem);
84
85 /* the cache does not exist - create a candidate */
86 candidate = fscache_alloc_cache(name);
87 if (!candidate)
88 return ERR_PTR(error: -ENOMEM);
89
90 /* write lock, search again and add if still not present */
91 down_write(sem: &fscache_addremove_sem);
92
93 list_for_each_entry(cache, &fscache_caches, cache_link) {
94 if (cache->name && name && strcmp(cache->name, name) == 0 &&
95 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
96 goto got_cache_w;
97 if (!cache->name) {
98 unnamed = cache;
99 if (!name &&
100 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
101 goto got_cache_w;
102 }
103 }
104
105 if (unnamed && is_cache &&
106 fscache_get_cache_maybe(cache: unnamed, where: fscache_cache_get_acquire))
107 goto use_unnamed_cache;
108
109 if (!name) {
110 list_for_each_entry(cache, &fscache_caches, cache_link) {
111 if (cache->name &&
112 fscache_get_cache_maybe(cache, where: fscache_cache_get_acquire))
113 goto got_cache_w;
114 }
115 }
116
117 list_add_tail(new: &candidate->cache_link, head: &fscache_caches);
118 trace_fscache_cache(cache_debug_id: candidate->debug_id,
119 usage: refcount_read(r: &candidate->ref),
120 where: fscache_cache_new_acquire);
121 up_write(sem: &fscache_addremove_sem);
122 return candidate;
123
124got_cache_r:
125 up_read(sem: &fscache_addremove_sem);
126 return cache;
127use_unnamed_cache:
128 cache = unnamed;
129 cache->name = candidate->name;
130 candidate->name = NULL;
131got_cache_w:
132 up_write(sem: &fscache_addremove_sem);
133 kfree(objp: candidate->name);
134 kfree(objp: candidate);
135 return cache;
136}
137
138/**
139 * fscache_acquire_cache - Acquire a cache-level cookie.
140 * @name: The name of the cache.
141 *
142 * Get a cookie to represent an actual cache. If a name is given and there is
143 * a nameless cache record available, this will acquire that and set its name,
144 * directing all the volumes using it to this cache.
145 *
146 * The cache will be switched over to the preparing state if not currently in
147 * use, otherwise -EBUSY will be returned.
148 */
149struct fscache_cache *fscache_acquire_cache(const char *name)
150{
151 struct fscache_cache *cache;
152
153 ASSERT(name);
154 cache = fscache_lookup_cache(name, is_cache: true);
155 if (IS_ERR(ptr: cache))
156 return cache;
157
158 if (!fscache_set_cache_state_maybe(cache,
159 old_state: FSCACHE_CACHE_IS_NOT_PRESENT,
160 new_state: FSCACHE_CACHE_IS_PREPARING)) {
161 pr_warn("Cache tag %s in use\n", name);
162 fscache_put_cache(cache, where: fscache_cache_put_cache);
163 return ERR_PTR(error: -EBUSY);
164 }
165
166 return cache;
167}
168EXPORT_SYMBOL(fscache_acquire_cache);
169
170/**
171 * fscache_put_cache - Release a cache-level cookie.
172 * @cache: The cache cookie to be released
173 * @where: An indication of where the release happened
174 *
175 * Release the caller's reference on a cache-level cookie. The @where
176 * indication should give information about the circumstances in which the call
177 * occurs and will be logged through a tracepoint.
178 */
179void fscache_put_cache(struct fscache_cache *cache,
180 enum fscache_cache_trace where)
181{
182 unsigned int debug_id = cache->debug_id;
183 bool zero;
184 int ref;
185
186 if (IS_ERR_OR_NULL(ptr: cache))
187 return;
188
189 zero = __refcount_dec_and_test(r: &cache->ref, oldp: &ref);
190 trace_fscache_cache(cache_debug_id: debug_id, usage: ref - 1, where);
191
192 if (zero) {
193 down_write(sem: &fscache_addremove_sem);
194 list_del_init(entry: &cache->cache_link);
195 up_write(sem: &fscache_addremove_sem);
196 kfree(objp: cache->name);
197 kfree(objp: cache);
198 }
199}
200
201/**
202 * fscache_relinquish_cache - Reset cache state and release cookie
203 * @cache: The cache cookie to be released
204 *
205 * Reset the state of a cache and release the caller's reference on a cache
206 * cookie.
207 */
208void fscache_relinquish_cache(struct fscache_cache *cache)
209{
210 enum fscache_cache_trace where =
211 (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
212 fscache_cache_put_prep_failed :
213 fscache_cache_put_relinquish;
214
215 cache->ops = NULL;
216 cache->cache_priv = NULL;
217 fscache_set_cache_state(cache, new_state: FSCACHE_CACHE_IS_NOT_PRESENT);
218 fscache_put_cache(cache, where);
219}
220EXPORT_SYMBOL(fscache_relinquish_cache);
221
222/**
223 * fscache_add_cache - Declare a cache as being open for business
224 * @cache: The cache-level cookie representing the cache
225 * @ops: Table of cache operations to use
226 * @cache_priv: Private data for the cache record
227 *
228 * Add a cache to the system, making it available for netfs's to use.
229 *
230 * See Documentation/filesystems/caching/backend-api.rst for a complete
231 * description.
232 */
233int fscache_add_cache(struct fscache_cache *cache,
234 const struct fscache_cache_ops *ops,
235 void *cache_priv)
236{
237 int n_accesses;
238
239 _enter("{%s,%s}", ops->name, cache->name);
240
241 BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
242
243 /* Get a ref on the cache cookie and keep its n_accesses counter raised
244 * by 1 to prevent wakeups from transitioning it to 0 until we're
245 * withdrawing caching services from it.
246 */
247 n_accesses = atomic_inc_return(v: &cache->n_accesses);
248 trace_fscache_access_cache(cache_debug_id: cache->debug_id, ref: refcount_read(r: &cache->ref),
249 n_accesses, why: fscache_access_cache_pin);
250
251 down_write(sem: &fscache_addremove_sem);
252
253 cache->ops = ops;
254 cache->cache_priv = cache_priv;
255 fscache_set_cache_state(cache, new_state: FSCACHE_CACHE_IS_ACTIVE);
256
257 up_write(sem: &fscache_addremove_sem);
258 pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
259 _leave(" = 0 [%s]", cache->name);
260 return 0;
261}
262EXPORT_SYMBOL(fscache_add_cache);
263
264/**
265 * fscache_begin_cache_access - Pin a cache so it can be accessed
266 * @cache: The cache-level cookie
267 * @why: An indication of the circumstances of the access for tracing
268 *
269 * Attempt to pin the cache to prevent it from going away whilst we're
270 * accessing it and returns true if successful. This works as follows:
271 *
272 * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
273 * then we return false to indicate access was not permitted.
274 *
275 * (2) If the cache tests as live, then we increment the n_accesses count and
276 * then recheck the liveness, ending the access if it ceased to be live.
277 *
278 * (3) When we end the access, we decrement n_accesses and wake up the any
279 * waiters if it reaches 0.
280 *
281 * (4) Whilst the cache is caching, n_accesses is kept artificially
282 * incremented to prevent wakeups from happening.
283 *
284 * (5) When the cache is taken offline, the state is changed to prevent new
285 * accesses, n_accesses is decremented and we wait for n_accesses to
286 * become 0.
287 */
288bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
289{
290 int n_accesses;
291
292 if (!fscache_cache_is_live(cache))
293 return false;
294
295 n_accesses = atomic_inc_return(v: &cache->n_accesses);
296 smp_mb__after_atomic(); /* Reread live flag after n_accesses */
297 trace_fscache_access_cache(cache_debug_id: cache->debug_id, ref: refcount_read(r: &cache->ref),
298 n_accesses, why);
299 if (!fscache_cache_is_live(cache)) {
300 fscache_end_cache_access(cache, why: fscache_access_unlive);
301 return false;
302 }
303 return true;
304}
305
306/**
307 * fscache_end_cache_access - Unpin a cache at the end of an access.
308 * @cache: The cache-level cookie
309 * @why: An indication of the circumstances of the access for tracing
310 *
311 * Unpin a cache after we've accessed it. The @why indicator is merely
312 * provided for tracing purposes.
313 */
314void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
315{
316 int n_accesses;
317
318 smp_mb__before_atomic();
319 n_accesses = atomic_dec_return(v: &cache->n_accesses);
320 trace_fscache_access_cache(cache_debug_id: cache->debug_id, ref: refcount_read(r: &cache->ref),
321 n_accesses, why);
322 if (n_accesses == 0)
323 wake_up_var(var: &cache->n_accesses);
324}
325
326/**
327 * fscache_io_error - Note a cache I/O error
328 * @cache: The record describing the cache
329 *
330 * Note that an I/O error occurred in a cache and that it should no longer be
331 * used for anything. This also reports the error into the kernel log.
332 *
333 * See Documentation/filesystems/caching/backend-api.rst for a complete
334 * description.
335 */
336void fscache_io_error(struct fscache_cache *cache)
337{
338 if (fscache_set_cache_state_maybe(cache,
339 old_state: FSCACHE_CACHE_IS_ACTIVE,
340 new_state: FSCACHE_CACHE_GOT_IOERROR))
341 pr_err("Cache '%s' stopped due to I/O error\n",
342 cache->name);
343}
344EXPORT_SYMBOL(fscache_io_error);
345
346/**
347 * fscache_withdraw_cache - Withdraw a cache from the active service
348 * @cache: The cache cookie
349 *
350 * Begin the process of withdrawing a cache from service. This stops new
351 * cache-level and volume-level accesses from taking place and waits for
352 * currently ongoing cache-level accesses to end.
353 */
354void fscache_withdraw_cache(struct fscache_cache *cache)
355{
356 int n_accesses;
357
358 pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
359 cache->name, atomic_read(&cache->object_count));
360
361 fscache_set_cache_state(cache, new_state: FSCACHE_CACHE_IS_WITHDRAWN);
362
363 /* Allow wakeups on dec-to-0 */
364 n_accesses = atomic_dec_return(v: &cache->n_accesses);
365 trace_fscache_access_cache(cache_debug_id: cache->debug_id, ref: refcount_read(r: &cache->ref),
366 n_accesses, why: fscache_access_cache_unpin);
367
368 wait_var_event(&cache->n_accesses,
369 atomic_read(&cache->n_accesses) == 0);
370}
371EXPORT_SYMBOL(fscache_withdraw_cache);
372
373#ifdef CONFIG_PROC_FS
374static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
375
376/*
377 * Generate a list of caches in /proc/fs/fscache/caches
378 */
379static int fscache_caches_seq_show(struct seq_file *m, void *v)
380{
381 struct fscache_cache *cache;
382
383 if (v == &fscache_caches) {
384 seq_puts(m,
385 s: "CACHE REF VOLS OBJS ACCES S NAME\n"
386 "======== ===== ===== ===== ===== = ===============\n"
387 );
388 return 0;
389 }
390
391 cache = list_entry(v, struct fscache_cache, cache_link);
392 seq_printf(m,
393 fmt: "%08x %5d %5d %5d %5d %c %s\n",
394 cache->debug_id,
395 refcount_read(r: &cache->ref),
396 atomic_read(v: &cache->n_volumes),
397 atomic_read(v: &cache->object_count),
398 atomic_read(v: &cache->n_accesses),
399 fscache_cache_states[cache->state],
400 cache->name ?: "-");
401 return 0;
402}
403
404static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
405 __acquires(fscache_addremove_sem)
406{
407 down_read(sem: &fscache_addremove_sem);
408 return seq_list_start_head(head: &fscache_caches, pos: *_pos);
409}
410
411static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
412{
413 return seq_list_next(v, head: &fscache_caches, ppos: _pos);
414}
415
416static void fscache_caches_seq_stop(struct seq_file *m, void *v)
417 __releases(fscache_addremove_sem)
418{
419 up_read(sem: &fscache_addremove_sem);
420}
421
422const struct seq_operations fscache_caches_seq_ops = {
423 .start = fscache_caches_seq_start,
424 .next = fscache_caches_seq_next,
425 .stop = fscache_caches_seq_stop,
426 .show = fscache_caches_seq_show,
427};
428#endif /* CONFIG_PROC_FS */
429

source code of linux/fs/fscache/cache.c