1/*
2 * Cleancache frontend
3 *
4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of cleancache. See
6 * Documentation/vm/cleancache.rst for more information.
7 *
8 * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2.
12 */
13
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/exportfs.h>
17#include <linux/mm.h>
18#include <linux/debugfs.h>
19#include <linux/cleancache.h>
20
21/*
22 * cleancache_ops is set by cleancache_register_ops to contain the pointers
23 * to the cleancache "backend" implementation functions.
24 */
25static const struct cleancache_ops *cleancache_ops __read_mostly;
26
27/*
28 * Counters available via /sys/kernel/debug/cleancache (if debugfs is
29 * properly configured. These are for information only so are not protected
30 * against increment races.
31 */
32static u64 cleancache_succ_gets;
33static u64 cleancache_failed_gets;
34static u64 cleancache_puts;
35static u64 cleancache_invalidates;
36
37static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
38{
39 switch (sb->cleancache_poolid) {
40 case CLEANCACHE_NO_BACKEND:
41 __cleancache_init_fs(sb);
42 break;
43 case CLEANCACHE_NO_BACKEND_SHARED:
44 __cleancache_init_shared_fs(sb);
45 break;
46 }
47}
48
49/*
50 * Register operations for cleancache. Returns 0 on success.
51 */
52int cleancache_register_ops(const struct cleancache_ops *ops)
53{
54 if (cmpxchg(&cleancache_ops, NULL, ops))
55 return -EBUSY;
56
57 /*
58 * A cleancache backend can be built as a module and hence loaded after
59 * a cleancache enabled filesystem has called cleancache_init_fs. To
60 * handle such a scenario, here we call ->init_fs or ->init_shared_fs
61 * for each active super block. To differentiate between local and
62 * shared filesystems, we temporarily initialize sb->cleancache_poolid
63 * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
64 * respectively in case there is no backend registered at the time
65 * cleancache_init_fs or cleancache_init_shared_fs is called.
66 *
67 * Since filesystems can be mounted concurrently with cleancache
68 * backend registration, we have to be careful to guarantee that all
69 * cleancache enabled filesystems that has been mounted by the time
70 * cleancache_register_ops is called has got and all mounted later will
71 * get cleancache_poolid. This is assured by the following statements
72 * tied together:
73 *
74 * a) iterate_supers skips only those super blocks that has started
75 * ->kill_sb
76 *
77 * b) if iterate_supers encounters a super block that has not finished
78 * ->mount yet, it waits until it is finished
79 *
80 * c) cleancache_init_fs is called from ->mount and
81 * cleancache_invalidate_fs is called from ->kill_sb
82 *
83 * d) we call iterate_supers after cleancache_ops has been set
84 *
85 * From a) it follows that if iterate_supers skips a super block, then
86 * either the super block is already dead, in which case we do not need
87 * to bother initializing cleancache for it, or it was mounted after we
88 * initiated iterate_supers. In the latter case, it must have seen
89 * cleancache_ops set according to d) and initialized cleancache from
90 * ->mount by itself according to c). This proves that we call
91 * ->init_fs at least once for each active super block.
92 *
93 * From b) and c) it follows that if iterate_supers encounters a super
94 * block that has already started ->init_fs, it will wait until ->mount
95 * and hence ->init_fs has finished, then check cleancache_poolid, see
96 * that it has already been set and therefore do nothing. This proves
97 * that we call ->init_fs no more than once for each super block.
98 *
99 * Combined together, the last two paragraphs prove the function
100 * correctness.
101 *
102 * Note that various cleancache callbacks may proceed before this
103 * function is called or even concurrently with it, but since
104 * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
105 * until the corresponding ->init_fs has been actually called and
106 * cleancache_ops has been set.
107 */
108 iterate_supers(cleancache_register_ops_sb, NULL);
109 return 0;
110}
111EXPORT_SYMBOL(cleancache_register_ops);
112
113/* Called by a cleancache-enabled filesystem at time of mount */
114void __cleancache_init_fs(struct super_block *sb)
115{
116 int pool_id = CLEANCACHE_NO_BACKEND;
117
118 if (cleancache_ops) {
119 pool_id = cleancache_ops->init_fs(PAGE_SIZE);
120 if (pool_id < 0)
121 pool_id = CLEANCACHE_NO_POOL;
122 }
123 sb->cleancache_poolid = pool_id;
124}
125EXPORT_SYMBOL(__cleancache_init_fs);
126
127/* Called by a cleancache-enabled clustered filesystem at time of mount */
128void __cleancache_init_shared_fs(struct super_block *sb)
129{
130 int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
131
132 if (cleancache_ops) {
133 pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
134 if (pool_id < 0)
135 pool_id = CLEANCACHE_NO_POOL;
136 }
137 sb->cleancache_poolid = pool_id;
138}
139EXPORT_SYMBOL(__cleancache_init_shared_fs);
140
141/*
142 * If the filesystem uses exportable filehandles, use the filehandle as
143 * the key, else use the inode number.
144 */
145static int cleancache_get_key(struct inode *inode,
146 struct cleancache_filekey *key)
147{
148 int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
149 int len = 0, maxlen = CLEANCACHE_KEY_MAX;
150 struct super_block *sb = inode->i_sb;
151
152 key->u.ino = inode->i_ino;
153 if (sb->s_export_op != NULL) {
154 fhfn = sb->s_export_op->encode_fh;
155 if (fhfn) {
156 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
157 if (len <= FILEID_ROOT || len == FILEID_INVALID)
158 return -1;
159 if (maxlen > CLEANCACHE_KEY_MAX)
160 return -1;
161 }
162 }
163 return 0;
164}
165
166/*
167 * "Get" data from cleancache associated with the poolid/inode/index
168 * that were specified when the data was put to cleanache and, if
169 * successful, use it to fill the specified page with data and return 0.
170 * The pageframe is unchanged and returns -1 if the get fails.
171 * Page must be locked by caller.
172 *
173 * The function has two checks before any action is taken - whether
174 * a backend is registered and whether the sb->cleancache_poolid
175 * is correct.
176 */
177int __cleancache_get_page(struct page *page)
178{
179 int ret = -1;
180 int pool_id;
181 struct cleancache_filekey key = { .u.key = { 0 } };
182
183 if (!cleancache_ops) {
184 cleancache_failed_gets++;
185 goto out;
186 }
187
188 VM_BUG_ON_PAGE(!PageLocked(page), page);
189 pool_id = page->mapping->host->i_sb->cleancache_poolid;
190 if (pool_id < 0)
191 goto out;
192
193 if (cleancache_get_key(page->mapping->host, &key) < 0)
194 goto out;
195
196 ret = cleancache_ops->get_page(pool_id, key, page->index, page);
197 if (ret == 0)
198 cleancache_succ_gets++;
199 else
200 cleancache_failed_gets++;
201out:
202 return ret;
203}
204EXPORT_SYMBOL(__cleancache_get_page);
205
206/*
207 * "Put" data from a page to cleancache and associate it with the
208 * (previously-obtained per-filesystem) poolid and the page's,
209 * inode and page index. Page must be locked. Note that a put_page
210 * always "succeeds", though a subsequent get_page may succeed or fail.
211 *
212 * The function has two checks before any action is taken - whether
213 * a backend is registered and whether the sb->cleancache_poolid
214 * is correct.
215 */
216void __cleancache_put_page(struct page *page)
217{
218 int pool_id;
219 struct cleancache_filekey key = { .u.key = { 0 } };
220
221 if (!cleancache_ops) {
222 cleancache_puts++;
223 return;
224 }
225
226 VM_BUG_ON_PAGE(!PageLocked(page), page);
227 pool_id = page->mapping->host->i_sb->cleancache_poolid;
228 if (pool_id >= 0 &&
229 cleancache_get_key(page->mapping->host, &key) >= 0) {
230 cleancache_ops->put_page(pool_id, key, page->index, page);
231 cleancache_puts++;
232 }
233}
234EXPORT_SYMBOL(__cleancache_put_page);
235
236/*
237 * Invalidate any data from cleancache associated with the poolid and the
238 * page's inode and page index so that a subsequent "get" will fail.
239 *
240 * The function has two checks before any action is taken - whether
241 * a backend is registered and whether the sb->cleancache_poolid
242 * is correct.
243 */
244void __cleancache_invalidate_page(struct address_space *mapping,
245 struct page *page)
246{
247 /* careful... page->mapping is NULL sometimes when this is called */
248 int pool_id = mapping->host->i_sb->cleancache_poolid;
249 struct cleancache_filekey key = { .u.key = { 0 } };
250
251 if (!cleancache_ops)
252 return;
253
254 if (pool_id >= 0) {
255 VM_BUG_ON_PAGE(!PageLocked(page), page);
256 if (cleancache_get_key(mapping->host, &key) >= 0) {
257 cleancache_ops->invalidate_page(pool_id,
258 key, page->index);
259 cleancache_invalidates++;
260 }
261 }
262}
263EXPORT_SYMBOL(__cleancache_invalidate_page);
264
265/*
266 * Invalidate all data from cleancache associated with the poolid and the
267 * mappings's inode so that all subsequent gets to this poolid/inode
268 * will fail.
269 *
270 * The function has two checks before any action is taken - whether
271 * a backend is registered and whether the sb->cleancache_poolid
272 * is correct.
273 */
274void __cleancache_invalidate_inode(struct address_space *mapping)
275{
276 int pool_id = mapping->host->i_sb->cleancache_poolid;
277 struct cleancache_filekey key = { .u.key = { 0 } };
278
279 if (!cleancache_ops)
280 return;
281
282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
283 cleancache_ops->invalidate_inode(pool_id, key);
284}
285EXPORT_SYMBOL(__cleancache_invalidate_inode);
286
287/*
288 * Called by any cleancache-enabled filesystem at time of unmount;
289 * note that pool_id is surrendered and may be returned by a subsequent
290 * cleancache_init_fs or cleancache_init_shared_fs.
291 */
292void __cleancache_invalidate_fs(struct super_block *sb)
293{
294 int pool_id;
295
296 pool_id = sb->cleancache_poolid;
297 sb->cleancache_poolid = CLEANCACHE_NO_POOL;
298
299 if (cleancache_ops && pool_id >= 0)
300 cleancache_ops->invalidate_fs(pool_id);
301}
302EXPORT_SYMBOL(__cleancache_invalidate_fs);
303
304static int __init init_cleancache(void)
305{
306#ifdef CONFIG_DEBUG_FS
307 struct dentry *root = debugfs_create_dir("cleancache", NULL);
308 if (root == NULL)
309 return -ENXIO;
310 debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets);
311 debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets);
312 debugfs_create_u64("puts", 0444, root, &cleancache_puts);
313 debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates);
314#endif
315 return 0;
316}
317module_init(init_cleancache)
318