1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Request reply cache. This is currently a global cache, but this may |
4 | * change in the future and be a per-client cache. |
5 | * |
6 | * This code is heavily inspired by the 44BSD implementation, although |
7 | * it does things a bit differently. |
8 | * |
9 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
10 | */ |
11 | |
12 | #include <linux/sunrpc/svc_xprt.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> |
15 | #include <linux/sunrpc/addr.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/log2.h> |
18 | #include <linux/hash.h> |
19 | #include <net/checksum.h> |
20 | |
21 | #include "nfsd.h" |
22 | #include "cache.h" |
23 | #include "trace.h" |
24 | |
25 | /* |
26 | * We use this value to determine the number of hash buckets from the max |
27 | * cache size, the idea being that when the cache is at its maximum number |
28 | * of entries, then this should be the average number of entries per bucket. |
29 | */ |
30 | #define TARGET_BUCKET_SIZE 64 |
31 | |
32 | struct nfsd_drc_bucket { |
33 | struct rb_root rb_head; |
34 | struct list_head lru_head; |
35 | spinlock_t cache_lock; |
36 | }; |
37 | |
38 | static struct kmem_cache *drc_slab; |
39 | |
40 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
41 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
42 | struct shrink_control *sc); |
43 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, |
44 | struct shrink_control *sc); |
45 | |
46 | /* |
47 | * Put a cap on the size of the DRC based on the amount of available |
48 | * low memory in the machine. |
49 | * |
50 | * 64MB: 8192 |
51 | * 128MB: 11585 |
52 | * 256MB: 16384 |
53 | * 512MB: 23170 |
54 | * 1GB: 32768 |
55 | * 2GB: 46340 |
56 | * 4GB: 65536 |
57 | * 8GB: 92681 |
58 | * 16GB: 131072 |
59 | * |
60 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
61 | * ~1k, so the above numbers should give a rough max of the amount of memory |
62 | * used in k. |
63 | * |
64 | * XXX: these limits are per-container, so memory used will increase |
65 | * linearly with number of containers. Maybe that's OK. |
66 | */ |
67 | static unsigned int |
68 | nfsd_cache_size_limit(void) |
69 | { |
70 | unsigned int limit; |
71 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
72 | |
73 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
74 | return min_t(unsigned int, limit, 256*1024); |
75 | } |
76 | |
77 | /* |
78 | * Compute the number of hash buckets we need. Divide the max cachesize by |
79 | * the "target" max bucket size, and round up to next power of two. |
80 | */ |
81 | static unsigned int |
82 | nfsd_hashsize(unsigned int limit) |
83 | { |
84 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); |
85 | } |
86 | |
87 | static struct nfsd_cacherep * |
88 | nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum, |
89 | struct nfsd_net *nn) |
90 | { |
91 | struct nfsd_cacherep *rp; |
92 | |
93 | rp = kmem_cache_alloc(cachep: drc_slab, GFP_KERNEL); |
94 | if (rp) { |
95 | rp->c_state = RC_UNUSED; |
96 | rp->c_type = RC_NOCACHE; |
97 | RB_CLEAR_NODE(&rp->c_node); |
98 | INIT_LIST_HEAD(list: &rp->c_lru); |
99 | |
100 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
101 | rp->c_key.k_xid = rqstp->rq_xid; |
102 | rp->c_key.k_proc = rqstp->rq_proc; |
103 | rpc_copy_addr(dst: (struct sockaddr *)&rp->c_key.k_addr, src: svc_addr(rqst: rqstp)); |
104 | rpc_set_port(sap: (struct sockaddr *)&rp->c_key.k_addr, port: rpc_get_port(sap: svc_addr(rqst: rqstp))); |
105 | rp->c_key.k_prot = rqstp->rq_prot; |
106 | rp->c_key.k_vers = rqstp->rq_vers; |
107 | rp->c_key.k_len = rqstp->rq_arg.len; |
108 | rp->c_key.k_csum = csum; |
109 | } |
110 | return rp; |
111 | } |
112 | |
113 | static void nfsd_cacherep_free(struct nfsd_cacherep *rp) |
114 | { |
115 | if (rp->c_type == RC_REPLBUFF) |
116 | kfree(objp: rp->c_replvec.iov_base); |
117 | kmem_cache_free(s: drc_slab, objp: rp); |
118 | } |
119 | |
120 | static unsigned long |
121 | nfsd_cacherep_dispose(struct list_head *dispose) |
122 | { |
123 | struct nfsd_cacherep *rp; |
124 | unsigned long freed = 0; |
125 | |
126 | while (!list_empty(head: dispose)) { |
127 | rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru); |
128 | list_del(entry: &rp->c_lru); |
129 | nfsd_cacherep_free(rp); |
130 | freed++; |
131 | } |
132 | return freed; |
133 | } |
134 | |
135 | static void |
136 | nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, |
137 | struct nfsd_cacherep *rp) |
138 | { |
139 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) |
140 | nfsd_stats_drc_mem_usage_sub(nn, amount: rp->c_replvec.iov_len); |
141 | if (rp->c_state != RC_UNUSED) { |
142 | rb_erase(&rp->c_node, &b->rb_head); |
143 | list_del(entry: &rp->c_lru); |
144 | atomic_dec(v: &nn->num_drc_entries); |
145 | nfsd_stats_drc_mem_usage_sub(nn, amount: sizeof(*rp)); |
146 | } |
147 | } |
148 | |
149 | static void |
150 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, |
151 | struct nfsd_net *nn) |
152 | { |
153 | nfsd_cacherep_unlink_locked(nn, b, rp); |
154 | nfsd_cacherep_free(rp); |
155 | } |
156 | |
157 | static void |
158 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, |
159 | struct nfsd_net *nn) |
160 | { |
161 | spin_lock(lock: &b->cache_lock); |
162 | nfsd_cacherep_unlink_locked(nn, b, rp); |
163 | spin_unlock(lock: &b->cache_lock); |
164 | nfsd_cacherep_free(rp); |
165 | } |
166 | |
167 | int nfsd_drc_slab_create(void) |
168 | { |
169 | drc_slab = KMEM_CACHE(nfsd_cacherep, 0); |
170 | return drc_slab ? 0: -ENOMEM; |
171 | } |
172 | |
173 | void nfsd_drc_slab_free(void) |
174 | { |
175 | kmem_cache_destroy(s: drc_slab); |
176 | } |
177 | |
178 | int nfsd_reply_cache_init(struct nfsd_net *nn) |
179 | { |
180 | unsigned int hashsize; |
181 | unsigned int i; |
182 | |
183 | nn->max_drc_entries = nfsd_cache_size_limit(); |
184 | atomic_set(v: &nn->num_drc_entries, i: 0); |
185 | hashsize = nfsd_hashsize(limit: nn->max_drc_entries); |
186 | nn->maskbits = ilog2(hashsize); |
187 | |
188 | nn->drc_hashtbl = kvzalloc(array_size(hashsize, |
189 | sizeof(*nn->drc_hashtbl)), GFP_KERNEL); |
190 | if (!nn->drc_hashtbl) |
191 | return -ENOMEM; |
192 | |
193 | nn->nfsd_reply_cache_shrinker = shrinker_alloc(flags: 0, fmt: "nfsd-reply:%s" , |
194 | nn->nfsd_name); |
195 | if (!nn->nfsd_reply_cache_shrinker) |
196 | goto out_shrinker; |
197 | |
198 | nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan; |
199 | nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count; |
200 | nn->nfsd_reply_cache_shrinker->seeks = 1; |
201 | nn->nfsd_reply_cache_shrinker->private_data = nn; |
202 | |
203 | shrinker_register(shrinker: nn->nfsd_reply_cache_shrinker); |
204 | |
205 | for (i = 0; i < hashsize; i++) { |
206 | INIT_LIST_HEAD(list: &nn->drc_hashtbl[i].lru_head); |
207 | spin_lock_init(&nn->drc_hashtbl[i].cache_lock); |
208 | } |
209 | nn->drc_hashsize = hashsize; |
210 | |
211 | return 0; |
212 | out_shrinker: |
213 | kvfree(addr: nn->drc_hashtbl); |
214 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n" ); |
215 | return -ENOMEM; |
216 | } |
217 | |
218 | void nfsd_reply_cache_shutdown(struct nfsd_net *nn) |
219 | { |
220 | struct nfsd_cacherep *rp; |
221 | unsigned int i; |
222 | |
223 | shrinker_free(shrinker: nn->nfsd_reply_cache_shrinker); |
224 | |
225 | for (i = 0; i < nn->drc_hashsize; i++) { |
226 | struct list_head *head = &nn->drc_hashtbl[i].lru_head; |
227 | while (!list_empty(head)) { |
228 | rp = list_first_entry(head, struct nfsd_cacherep, c_lru); |
229 | nfsd_reply_cache_free_locked(b: &nn->drc_hashtbl[i], |
230 | rp, nn); |
231 | } |
232 | } |
233 | |
234 | kvfree(addr: nn->drc_hashtbl); |
235 | nn->drc_hashtbl = NULL; |
236 | nn->drc_hashsize = 0; |
237 | |
238 | } |
239 | |
240 | /* |
241 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
242 | * not already scheduled. |
243 | */ |
244 | static void |
245 | lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp) |
246 | { |
247 | rp->c_timestamp = jiffies; |
248 | list_move_tail(list: &rp->c_lru, head: &b->lru_head); |
249 | } |
250 | |
251 | static noinline struct nfsd_drc_bucket * |
252 | nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn) |
253 | { |
254 | unsigned int hash = hash_32(val: (__force u32)xid, bits: nn->maskbits); |
255 | |
256 | return &nn->drc_hashtbl[hash]; |
257 | } |
258 | |
259 | /* |
260 | * Remove and return no more than @max expired entries in bucket @b. |
261 | * If @max is zero, do not limit the number of removed entries. |
262 | */ |
263 | static void |
264 | nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, |
265 | unsigned int max, struct list_head *dispose) |
266 | { |
267 | unsigned long expiry = jiffies - RC_EXPIRE; |
268 | struct nfsd_cacherep *rp, *tmp; |
269 | unsigned int freed = 0; |
270 | |
271 | lockdep_assert_held(&b->cache_lock); |
272 | |
273 | /* The bucket LRU is ordered oldest-first. */ |
274 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
275 | /* |
276 | * Don't free entries attached to calls that are still |
277 | * in-progress, but do keep scanning the list. |
278 | */ |
279 | if (rp->c_state == RC_INPROG) |
280 | continue; |
281 | |
282 | if (atomic_read(v: &nn->num_drc_entries) <= nn->max_drc_entries && |
283 | time_before(expiry, rp->c_timestamp)) |
284 | break; |
285 | |
286 | nfsd_cacherep_unlink_locked(nn, b, rp); |
287 | list_add(new: &rp->c_lru, head: dispose); |
288 | |
289 | if (max && ++freed > max) |
290 | break; |
291 | } |
292 | } |
293 | |
294 | /** |
295 | * nfsd_reply_cache_count - count_objects method for the DRC shrinker |
296 | * @shrink: our registered shrinker context |
297 | * @sc: garbage collection parameters |
298 | * |
299 | * Returns the total number of entries in the duplicate reply cache. To |
300 | * keep things simple and quick, this is not the number of expired entries |
301 | * in the cache (ie, the number that would be removed by a call to |
302 | * nfsd_reply_cache_scan). |
303 | */ |
304 | static unsigned long |
305 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
306 | { |
307 | struct nfsd_net *nn = shrink->private_data; |
308 | |
309 | return atomic_read(v: &nn->num_drc_entries); |
310 | } |
311 | |
312 | /** |
313 | * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker |
314 | * @shrink: our registered shrinker context |
315 | * @sc: garbage collection parameters |
316 | * |
317 | * Free expired entries on each bucket's LRU list until we've released |
318 | * nr_to_scan freed objects. Nothing will be released if the cache |
319 | * has not exceeded it's max_drc_entries limit. |
320 | * |
321 | * Returns the number of entries released by this call. |
322 | */ |
323 | static unsigned long |
324 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
325 | { |
326 | struct nfsd_net *nn = shrink->private_data; |
327 | unsigned long freed = 0; |
328 | LIST_HEAD(dispose); |
329 | unsigned int i; |
330 | |
331 | for (i = 0; i < nn->drc_hashsize; i++) { |
332 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; |
333 | |
334 | if (list_empty(head: &b->lru_head)) |
335 | continue; |
336 | |
337 | spin_lock(lock: &b->cache_lock); |
338 | nfsd_prune_bucket_locked(nn, b, max: 0, dispose: &dispose); |
339 | spin_unlock(lock: &b->cache_lock); |
340 | |
341 | freed += nfsd_cacherep_dispose(dispose: &dispose); |
342 | if (freed > sc->nr_to_scan) |
343 | break; |
344 | } |
345 | return freed; |
346 | } |
347 | |
348 | /** |
349 | * nfsd_cache_csum - Checksum incoming NFS Call arguments |
350 | * @buf: buffer containing a whole RPC Call message |
351 | * @start: starting byte of the NFS Call header |
352 | * @remaining: size of the NFS Call header, in bytes |
353 | * |
354 | * Compute a weak checksum of the leading bytes of an NFS procedure |
355 | * call header to help verify that a retransmitted Call matches an |
356 | * entry in the duplicate reply cache. |
357 | * |
358 | * To avoid assumptions about how the RPC message is laid out in |
359 | * @buf and what else it might contain (eg, a GSS MIC suffix), the |
360 | * caller passes us the exact location and length of the NFS Call |
361 | * header. |
362 | * |
363 | * Returns a 32-bit checksum value, as defined in RFC 793. |
364 | */ |
365 | static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start, |
366 | unsigned int remaining) |
367 | { |
368 | unsigned int base, len; |
369 | struct xdr_buf subbuf; |
370 | __wsum csum = 0; |
371 | void *p; |
372 | int idx; |
373 | |
374 | if (remaining > RC_CSUMLEN) |
375 | remaining = RC_CSUMLEN; |
376 | if (xdr_buf_subsegment(buf, &subbuf, start, remaining)) |
377 | return csum; |
378 | |
379 | /* rq_arg.head first */ |
380 | if (subbuf.head[0].iov_len) { |
381 | len = min_t(unsigned int, subbuf.head[0].iov_len, remaining); |
382 | csum = csum_partial(buff: subbuf.head[0].iov_base, len, sum: csum); |
383 | remaining -= len; |
384 | } |
385 | |
386 | /* Continue into page array */ |
387 | idx = subbuf.page_base / PAGE_SIZE; |
388 | base = subbuf.page_base & ~PAGE_MASK; |
389 | while (remaining) { |
390 | p = page_address(subbuf.pages[idx]) + base; |
391 | len = min_t(unsigned int, PAGE_SIZE - base, remaining); |
392 | csum = csum_partial(buff: p, len, sum: csum); |
393 | remaining -= len; |
394 | base = 0; |
395 | ++idx; |
396 | } |
397 | return csum; |
398 | } |
399 | |
400 | static int |
401 | nfsd_cache_key_cmp(const struct nfsd_cacherep *key, |
402 | const struct nfsd_cacherep *rp, struct nfsd_net *nn) |
403 | { |
404 | if (key->c_key.k_xid == rp->c_key.k_xid && |
405 | key->c_key.k_csum != rp->c_key.k_csum) { |
406 | nfsd_stats_payload_misses_inc(nn); |
407 | trace_nfsd_drc_mismatch(nn, key, rp); |
408 | } |
409 | |
410 | return memcmp(p: &key->c_key, q: &rp->c_key, size: sizeof(key->c_key)); |
411 | } |
412 | |
413 | /* |
414 | * Search the request hash for an entry that matches the given rqstp. |
415 | * Must be called with cache_lock held. Returns the found entry or |
416 | * inserts an empty key on failure. |
417 | */ |
418 | static struct nfsd_cacherep * |
419 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key, |
420 | struct nfsd_net *nn) |
421 | { |
422 | struct nfsd_cacherep *rp, *ret = key; |
423 | struct rb_node **p = &b->rb_head.rb_node, |
424 | *parent = NULL; |
425 | unsigned int entries = 0; |
426 | int cmp; |
427 | |
428 | while (*p != NULL) { |
429 | ++entries; |
430 | parent = *p; |
431 | rp = rb_entry(parent, struct nfsd_cacherep, c_node); |
432 | |
433 | cmp = nfsd_cache_key_cmp(key, rp, nn); |
434 | if (cmp < 0) |
435 | p = &parent->rb_left; |
436 | else if (cmp > 0) |
437 | p = &parent->rb_right; |
438 | else { |
439 | ret = rp; |
440 | goto out; |
441 | } |
442 | } |
443 | rb_link_node(node: &key->c_node, parent, rb_link: p); |
444 | rb_insert_color(&key->c_node, &b->rb_head); |
445 | out: |
446 | /* tally hash chain length stats */ |
447 | if (entries > nn->longest_chain) { |
448 | nn->longest_chain = entries; |
449 | nn->longest_chain_cachesize = atomic_read(v: &nn->num_drc_entries); |
450 | } else if (entries == nn->longest_chain) { |
451 | /* prefer to keep the smallest cachesize possible here */ |
452 | nn->longest_chain_cachesize = min_t(unsigned int, |
453 | nn->longest_chain_cachesize, |
454 | atomic_read(&nn->num_drc_entries)); |
455 | } |
456 | |
457 | lru_put_end(b, rp: ret); |
458 | return ret; |
459 | } |
460 | |
461 | /** |
462 | * nfsd_cache_lookup - Find an entry in the duplicate reply cache |
463 | * @rqstp: Incoming Call to find |
464 | * @start: starting byte in @rqstp->rq_arg of the NFS Call header |
465 | * @len: size of the NFS Call header, in bytes |
466 | * @cacherep: OUT: DRC entry for this request |
467 | * |
468 | * Try to find an entry matching the current call in the cache. When none |
469 | * is found, we try to grab the oldest expired entry off the LRU list. If |
470 | * a suitable one isn't there, then drop the cache_lock and allocate a |
471 | * new one, then search again in case one got inserted while this thread |
472 | * didn't hold the lock. |
473 | * |
474 | * Return values: |
475 | * %RC_DOIT: Process the request normally |
476 | * %RC_REPLY: Reply from cache |
477 | * %RC_DROPIT: Do not process the request further |
478 | */ |
479 | int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, |
480 | unsigned int len, struct nfsd_cacherep **cacherep) |
481 | { |
482 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), id: nfsd_net_id); |
483 | struct nfsd_cacherep *rp, *found; |
484 | __wsum csum; |
485 | struct nfsd_drc_bucket *b; |
486 | int type = rqstp->rq_cachetype; |
487 | LIST_HEAD(dispose); |
488 | int rtn = RC_DOIT; |
489 | |
490 | if (type == RC_NOCACHE) { |
491 | nfsd_stats_rc_nocache_inc(nn); |
492 | goto out; |
493 | } |
494 | |
495 | csum = nfsd_cache_csum(buf: &rqstp->rq_arg, start, remaining: len); |
496 | |
497 | /* |
498 | * Since the common case is a cache miss followed by an insert, |
499 | * preallocate an entry. |
500 | */ |
501 | rp = nfsd_cacherep_alloc(rqstp, csum, nn); |
502 | if (!rp) |
503 | goto out; |
504 | |
505 | b = nfsd_cache_bucket_find(xid: rqstp->rq_xid, nn); |
506 | spin_lock(lock: &b->cache_lock); |
507 | found = nfsd_cache_insert(b, key: rp, nn); |
508 | if (found != rp) |
509 | goto found_entry; |
510 | *cacherep = rp; |
511 | rp->c_state = RC_INPROG; |
512 | nfsd_prune_bucket_locked(nn, b, max: 3, dispose: &dispose); |
513 | spin_unlock(lock: &b->cache_lock); |
514 | |
515 | nfsd_cacherep_dispose(dispose: &dispose); |
516 | |
517 | nfsd_stats_rc_misses_inc(nn); |
518 | atomic_inc(v: &nn->num_drc_entries); |
519 | nfsd_stats_drc_mem_usage_add(nn, amount: sizeof(*rp)); |
520 | goto out; |
521 | |
522 | found_entry: |
523 | /* We found a matching entry which is either in progress or done. */ |
524 | nfsd_reply_cache_free_locked(NULL, rp, nn); |
525 | nfsd_stats_rc_hits_inc(nn); |
526 | rtn = RC_DROPIT; |
527 | rp = found; |
528 | |
529 | /* Request being processed */ |
530 | if (rp->c_state == RC_INPROG) |
531 | goto out_trace; |
532 | |
533 | /* From the hall of fame of impractical attacks: |
534 | * Is this a user who tries to snoop on the cache? */ |
535 | rtn = RC_DOIT; |
536 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
537 | goto out_trace; |
538 | |
539 | /* Compose RPC reply header */ |
540 | switch (rp->c_type) { |
541 | case RC_NOCACHE: |
542 | break; |
543 | case RC_REPLSTAT: |
544 | xdr_stream_encode_be32(xdr: &rqstp->rq_res_stream, n: rp->c_replstat); |
545 | rtn = RC_REPLY; |
546 | break; |
547 | case RC_REPLBUFF: |
548 | if (!nfsd_cache_append(rqstp, vec: &rp->c_replvec)) |
549 | goto out_unlock; /* should not happen */ |
550 | rtn = RC_REPLY; |
551 | break; |
552 | default: |
553 | WARN_ONCE(1, "nfsd: bad repcache type %d\n" , rp->c_type); |
554 | } |
555 | |
556 | out_trace: |
557 | trace_nfsd_drc_found(nn, rqstp, result: rtn); |
558 | out_unlock: |
559 | spin_unlock(lock: &b->cache_lock); |
560 | out: |
561 | return rtn; |
562 | } |
563 | |
564 | /** |
565 | * nfsd_cache_update - Update an entry in the duplicate reply cache. |
566 | * @rqstp: svc_rqst with a finished Reply |
567 | * @rp: IN: DRC entry for this request |
568 | * @cachetype: which cache to update |
569 | * @statp: pointer to Reply's NFS status code, or NULL |
570 | * |
571 | * This is called from nfsd_dispatch when the procedure has been |
572 | * executed and the complete reply is in rqstp->rq_res. |
573 | * |
574 | * We're copying around data here rather than swapping buffers because |
575 | * the toplevel loop requires max-sized buffers, which would be a waste |
576 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
577 | * |
578 | * If we should start to use different types of cache entries tailored |
579 | * specifically for attrstat and fh's, we may save even more space. |
580 | * |
581 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
582 | * nfsd failed to encode a reply that otherwise would have been cached. |
583 | * In this case, nfsd_cache_update is called with statp == NULL. |
584 | */ |
585 | void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, |
586 | int cachetype, __be32 *statp) |
587 | { |
588 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), id: nfsd_net_id); |
589 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
590 | struct nfsd_drc_bucket *b; |
591 | int len; |
592 | size_t bufsize = 0; |
593 | |
594 | if (!rp) |
595 | return; |
596 | |
597 | b = nfsd_cache_bucket_find(xid: rp->c_key.k_xid, nn); |
598 | |
599 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
600 | len >>= 2; |
601 | |
602 | /* Don't cache excessive amounts of data and XDR failures */ |
603 | if (!statp || len > (256 >> 2)) { |
604 | nfsd_reply_cache_free(b, rp, nn); |
605 | return; |
606 | } |
607 | |
608 | switch (cachetype) { |
609 | case RC_REPLSTAT: |
610 | if (len != 1) |
611 | printk("nfsd: RC_REPLSTAT/reply len %d!\n" ,len); |
612 | rp->c_replstat = *statp; |
613 | break; |
614 | case RC_REPLBUFF: |
615 | cachv = &rp->c_replvec; |
616 | bufsize = len << 2; |
617 | cachv->iov_base = kmalloc(size: bufsize, GFP_KERNEL); |
618 | if (!cachv->iov_base) { |
619 | nfsd_reply_cache_free(b, rp, nn); |
620 | return; |
621 | } |
622 | cachv->iov_len = bufsize; |
623 | memcpy(cachv->iov_base, statp, bufsize); |
624 | break; |
625 | case RC_NOCACHE: |
626 | nfsd_reply_cache_free(b, rp, nn); |
627 | return; |
628 | } |
629 | spin_lock(lock: &b->cache_lock); |
630 | nfsd_stats_drc_mem_usage_add(nn, amount: bufsize); |
631 | lru_put_end(b, rp); |
632 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
633 | rp->c_type = cachetype; |
634 | rp->c_state = RC_DONE; |
635 | spin_unlock(lock: &b->cache_lock); |
636 | return; |
637 | } |
638 | |
639 | static int |
640 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
641 | { |
642 | __be32 *p; |
643 | |
644 | p = xdr_reserve_space(xdr: &rqstp->rq_res_stream, nbytes: data->iov_len); |
645 | if (unlikely(!p)) |
646 | return false; |
647 | memcpy(p, data->iov_base, data->iov_len); |
648 | xdr_commit_encode(xdr: &rqstp->rq_res_stream); |
649 | return true; |
650 | } |
651 | |
652 | /* |
653 | * Note that fields may be added, removed or reordered in the future. Programs |
654 | * scraping this file for info should test the labels to ensure they're |
655 | * getting the correct field. |
656 | */ |
657 | int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) |
658 | { |
659 | struct nfsd_net *nn = net_generic(net: file_inode(f: m->file)->i_sb->s_fs_info, |
660 | id: nfsd_net_id); |
661 | |
662 | seq_printf(m, fmt: "max entries: %u\n" , nn->max_drc_entries); |
663 | seq_printf(m, fmt: "num entries: %u\n" , |
664 | atomic_read(v: &nn->num_drc_entries)); |
665 | seq_printf(m, fmt: "hash buckets: %u\n" , 1 << nn->maskbits); |
666 | seq_printf(m, fmt: "mem usage: %lld\n" , |
667 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_STATS_DRC_MEM_USAGE])); |
668 | seq_printf(m, fmt: "cache hits: %lld\n" , |
669 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_STATS_RC_HITS])); |
670 | seq_printf(m, fmt: "cache misses: %lld\n" , |
671 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_STATS_RC_MISSES])); |
672 | seq_printf(m, fmt: "not cached: %lld\n" , |
673 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_STATS_RC_NOCACHE])); |
674 | seq_printf(m, fmt: "payload misses: %lld\n" , |
675 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_STATS_PAYLOAD_MISSES])); |
676 | seq_printf(m, fmt: "longest chain len: %u\n" , nn->longest_chain); |
677 | seq_printf(m, fmt: "cachesize at longest: %u\n" , nn->longest_chain_cachesize); |
678 | return 0; |
679 | } |
680 | |