1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Request reply cache. This is currently a global cache, but this may |
4 | * change in the future and be a per-client cache. |
5 | * |
6 | * This code is heavily inspired by the 44BSD implementation, although |
7 | * it does things a bit differently. |
8 | * |
9 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
10 | */ |
11 | |
12 | #include <linux/sunrpc/svc_xprt.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> |
15 | #include <linux/sunrpc/addr.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/log2.h> |
18 | #include <linux/hash.h> |
19 | #include <net/checksum.h> |
20 | |
21 | #include "nfsd.h" |
22 | #include "cache.h" |
23 | #include "trace.h" |
24 | |
25 | /* |
26 | * We use this value to determine the number of hash buckets from the max |
27 | * cache size, the idea being that when the cache is at its maximum number |
28 | * of entries, then this should be the average number of entries per bucket. |
29 | */ |
30 | #define TARGET_BUCKET_SIZE 64 |
31 | |
32 | struct nfsd_drc_bucket { |
33 | struct rb_root rb_head; |
34 | struct list_head lru_head; |
35 | spinlock_t cache_lock; |
36 | }; |
37 | |
38 | static struct kmem_cache *drc_slab; |
39 | |
40 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
41 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
42 | struct shrink_control *sc); |
43 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, |
44 | struct shrink_control *sc); |
45 | |
46 | /* |
47 | * Put a cap on the size of the DRC based on the amount of available |
48 | * low memory in the machine. |
49 | * |
50 | * 64MB: 8192 |
51 | * 128MB: 11585 |
52 | * 256MB: 16384 |
53 | * 512MB: 23170 |
54 | * 1GB: 32768 |
55 | * 2GB: 46340 |
56 | * 4GB: 65536 |
57 | * 8GB: 92681 |
58 | * 16GB: 131072 |
59 | * |
60 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
61 | * ~1k, so the above numbers should give a rough max of the amount of memory |
62 | * used in k. |
63 | * |
64 | * XXX: these limits are per-container, so memory used will increase |
65 | * linearly with number of containers. Maybe that's OK. |
66 | */ |
67 | static unsigned int |
68 | nfsd_cache_size_limit(void) |
69 | { |
70 | unsigned int limit; |
71 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
72 | |
73 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
74 | return min_t(unsigned int, limit, 256*1024); |
75 | } |
76 | |
77 | /* |
78 | * Compute the number of hash buckets we need. Divide the max cachesize by |
79 | * the "target" max bucket size, and round up to next power of two. |
80 | */ |
81 | static unsigned int |
82 | nfsd_hashsize(unsigned int limit) |
83 | { |
84 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); |
85 | } |
86 | |
87 | static struct nfsd_cacherep * |
88 | nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum, |
89 | struct nfsd_net *nn) |
90 | { |
91 | struct nfsd_cacherep *rp; |
92 | |
93 | rp = kmem_cache_alloc(cachep: drc_slab, GFP_KERNEL); |
94 | if (rp) { |
95 | rp->c_state = RC_UNUSED; |
96 | rp->c_type = RC_NOCACHE; |
97 | RB_CLEAR_NODE(&rp->c_node); |
98 | INIT_LIST_HEAD(list: &rp->c_lru); |
99 | |
100 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
101 | rp->c_key.k_xid = rqstp->rq_xid; |
102 | rp->c_key.k_proc = rqstp->rq_proc; |
103 | rpc_copy_addr(dst: (struct sockaddr *)&rp->c_key.k_addr, src: svc_addr(rqst: rqstp)); |
104 | rpc_set_port(sap: (struct sockaddr *)&rp->c_key.k_addr, port: rpc_get_port(sap: svc_addr(rqst: rqstp))); |
105 | rp->c_key.k_prot = rqstp->rq_prot; |
106 | rp->c_key.k_vers = rqstp->rq_vers; |
107 | rp->c_key.k_len = rqstp->rq_arg.len; |
108 | rp->c_key.k_csum = csum; |
109 | } |
110 | return rp; |
111 | } |
112 | |
113 | static void nfsd_cacherep_free(struct nfsd_cacherep *rp) |
114 | { |
115 | if (rp->c_type == RC_REPLBUFF) |
116 | kfree(objp: rp->c_replvec.iov_base); |
117 | kmem_cache_free(s: drc_slab, objp: rp); |
118 | } |
119 | |
120 | static unsigned long |
121 | nfsd_cacherep_dispose(struct list_head *dispose) |
122 | { |
123 | struct nfsd_cacherep *rp; |
124 | unsigned long freed = 0; |
125 | |
126 | while (!list_empty(head: dispose)) { |
127 | rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru); |
128 | list_del(entry: &rp->c_lru); |
129 | nfsd_cacherep_free(rp); |
130 | freed++; |
131 | } |
132 | return freed; |
133 | } |
134 | |
135 | static void |
136 | nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, |
137 | struct nfsd_cacherep *rp) |
138 | { |
139 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) |
140 | nfsd_stats_drc_mem_usage_sub(nn, amount: rp->c_replvec.iov_len); |
141 | if (rp->c_state != RC_UNUSED) { |
142 | rb_erase(&rp->c_node, &b->rb_head); |
143 | list_del(entry: &rp->c_lru); |
144 | atomic_dec(v: &nn->num_drc_entries); |
145 | nfsd_stats_drc_mem_usage_sub(nn, amount: sizeof(*rp)); |
146 | } |
147 | } |
148 | |
149 | static void |
150 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, |
151 | struct nfsd_net *nn) |
152 | { |
153 | nfsd_cacherep_unlink_locked(nn, b, rp); |
154 | nfsd_cacherep_free(rp); |
155 | } |
156 | |
157 | static void |
158 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, |
159 | struct nfsd_net *nn) |
160 | { |
161 | spin_lock(lock: &b->cache_lock); |
162 | nfsd_cacherep_unlink_locked(nn, b, rp); |
163 | spin_unlock(lock: &b->cache_lock); |
164 | nfsd_cacherep_free(rp); |
165 | } |
166 | |
167 | int nfsd_drc_slab_create(void) |
168 | { |
169 | drc_slab = kmem_cache_create(name: "nfsd_drc" , |
170 | size: sizeof(struct nfsd_cacherep), align: 0, flags: 0, NULL); |
171 | return drc_slab ? 0: -ENOMEM; |
172 | } |
173 | |
174 | void nfsd_drc_slab_free(void) |
175 | { |
176 | kmem_cache_destroy(s: drc_slab); |
177 | } |
178 | |
179 | /** |
180 | * nfsd_net_reply_cache_init - per net namespace reply cache set-up |
181 | * @nn: nfsd_net being initialized |
182 | * |
183 | * Returns zero on succes; otherwise a negative errno is returned. |
184 | */ |
185 | int nfsd_net_reply_cache_init(struct nfsd_net *nn) |
186 | { |
187 | return nfsd_percpu_counters_init(counters: nn->counter, num: NFSD_NET_COUNTERS_NUM); |
188 | } |
189 | |
190 | /** |
191 | * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down |
192 | * @nn: nfsd_net being freed |
193 | * |
194 | */ |
195 | void nfsd_net_reply_cache_destroy(struct nfsd_net *nn) |
196 | { |
197 | nfsd_percpu_counters_destroy(counters: nn->counter, num: NFSD_NET_COUNTERS_NUM); |
198 | } |
199 | |
200 | int nfsd_reply_cache_init(struct nfsd_net *nn) |
201 | { |
202 | unsigned int hashsize; |
203 | unsigned int i; |
204 | |
205 | nn->max_drc_entries = nfsd_cache_size_limit(); |
206 | atomic_set(v: &nn->num_drc_entries, i: 0); |
207 | hashsize = nfsd_hashsize(limit: nn->max_drc_entries); |
208 | nn->maskbits = ilog2(hashsize); |
209 | |
210 | nn->drc_hashtbl = kvzalloc(array_size(hashsize, |
211 | sizeof(*nn->drc_hashtbl)), GFP_KERNEL); |
212 | if (!nn->drc_hashtbl) |
213 | return -ENOMEM; |
214 | |
215 | nn->nfsd_reply_cache_shrinker = shrinker_alloc(flags: 0, fmt: "nfsd-reply:%s" , |
216 | nn->nfsd_name); |
217 | if (!nn->nfsd_reply_cache_shrinker) |
218 | goto out_shrinker; |
219 | |
220 | nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan; |
221 | nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count; |
222 | nn->nfsd_reply_cache_shrinker->seeks = 1; |
223 | nn->nfsd_reply_cache_shrinker->private_data = nn; |
224 | |
225 | shrinker_register(shrinker: nn->nfsd_reply_cache_shrinker); |
226 | |
227 | for (i = 0; i < hashsize; i++) { |
228 | INIT_LIST_HEAD(list: &nn->drc_hashtbl[i].lru_head); |
229 | spin_lock_init(&nn->drc_hashtbl[i].cache_lock); |
230 | } |
231 | nn->drc_hashsize = hashsize; |
232 | |
233 | return 0; |
234 | out_shrinker: |
235 | kvfree(addr: nn->drc_hashtbl); |
236 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n" ); |
237 | return -ENOMEM; |
238 | } |
239 | |
240 | void nfsd_reply_cache_shutdown(struct nfsd_net *nn) |
241 | { |
242 | struct nfsd_cacherep *rp; |
243 | unsigned int i; |
244 | |
245 | shrinker_free(shrinker: nn->nfsd_reply_cache_shrinker); |
246 | |
247 | for (i = 0; i < nn->drc_hashsize; i++) { |
248 | struct list_head *head = &nn->drc_hashtbl[i].lru_head; |
249 | while (!list_empty(head)) { |
250 | rp = list_first_entry(head, struct nfsd_cacherep, c_lru); |
251 | nfsd_reply_cache_free_locked(b: &nn->drc_hashtbl[i], |
252 | rp, nn); |
253 | } |
254 | } |
255 | |
256 | kvfree(addr: nn->drc_hashtbl); |
257 | nn->drc_hashtbl = NULL; |
258 | nn->drc_hashsize = 0; |
259 | |
260 | } |
261 | |
262 | /* |
263 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
264 | * not already scheduled. |
265 | */ |
266 | static void |
267 | lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp) |
268 | { |
269 | rp->c_timestamp = jiffies; |
270 | list_move_tail(list: &rp->c_lru, head: &b->lru_head); |
271 | } |
272 | |
273 | static noinline struct nfsd_drc_bucket * |
274 | nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn) |
275 | { |
276 | unsigned int hash = hash_32(val: (__force u32)xid, bits: nn->maskbits); |
277 | |
278 | return &nn->drc_hashtbl[hash]; |
279 | } |
280 | |
281 | /* |
282 | * Remove and return no more than @max expired entries in bucket @b. |
283 | * If @max is zero, do not limit the number of removed entries. |
284 | */ |
285 | static void |
286 | nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, |
287 | unsigned int max, struct list_head *dispose) |
288 | { |
289 | unsigned long expiry = jiffies - RC_EXPIRE; |
290 | struct nfsd_cacherep *rp, *tmp; |
291 | unsigned int freed = 0; |
292 | |
293 | lockdep_assert_held(&b->cache_lock); |
294 | |
295 | /* The bucket LRU is ordered oldest-first. */ |
296 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
297 | /* |
298 | * Don't free entries attached to calls that are still |
299 | * in-progress, but do keep scanning the list. |
300 | */ |
301 | if (rp->c_state == RC_INPROG) |
302 | continue; |
303 | |
304 | if (atomic_read(v: &nn->num_drc_entries) <= nn->max_drc_entries && |
305 | time_before(expiry, rp->c_timestamp)) |
306 | break; |
307 | |
308 | nfsd_cacherep_unlink_locked(nn, b, rp); |
309 | list_add(new: &rp->c_lru, head: dispose); |
310 | |
311 | if (max && ++freed > max) |
312 | break; |
313 | } |
314 | } |
315 | |
316 | /** |
317 | * nfsd_reply_cache_count - count_objects method for the DRC shrinker |
318 | * @shrink: our registered shrinker context |
319 | * @sc: garbage collection parameters |
320 | * |
321 | * Returns the total number of entries in the duplicate reply cache. To |
322 | * keep things simple and quick, this is not the number of expired entries |
323 | * in the cache (ie, the number that would be removed by a call to |
324 | * nfsd_reply_cache_scan). |
325 | */ |
326 | static unsigned long |
327 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
328 | { |
329 | struct nfsd_net *nn = shrink->private_data; |
330 | |
331 | return atomic_read(v: &nn->num_drc_entries); |
332 | } |
333 | |
334 | /** |
335 | * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker |
336 | * @shrink: our registered shrinker context |
337 | * @sc: garbage collection parameters |
338 | * |
339 | * Free expired entries on each bucket's LRU list until we've released |
340 | * nr_to_scan freed objects. Nothing will be released if the cache |
341 | * has not exceeded it's max_drc_entries limit. |
342 | * |
343 | * Returns the number of entries released by this call. |
344 | */ |
345 | static unsigned long |
346 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
347 | { |
348 | struct nfsd_net *nn = shrink->private_data; |
349 | unsigned long freed = 0; |
350 | LIST_HEAD(dispose); |
351 | unsigned int i; |
352 | |
353 | for (i = 0; i < nn->drc_hashsize; i++) { |
354 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; |
355 | |
356 | if (list_empty(head: &b->lru_head)) |
357 | continue; |
358 | |
359 | spin_lock(lock: &b->cache_lock); |
360 | nfsd_prune_bucket_locked(nn, b, max: 0, dispose: &dispose); |
361 | spin_unlock(lock: &b->cache_lock); |
362 | |
363 | freed += nfsd_cacherep_dispose(dispose: &dispose); |
364 | if (freed > sc->nr_to_scan) |
365 | break; |
366 | } |
367 | |
368 | trace_nfsd_drc_gc(nn, freed); |
369 | return freed; |
370 | } |
371 | |
372 | /* |
373 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
374 | */ |
375 | static __wsum |
376 | nfsd_cache_csum(struct svc_rqst *rqstp) |
377 | { |
378 | int idx; |
379 | unsigned int base; |
380 | __wsum csum; |
381 | struct xdr_buf *buf = &rqstp->rq_arg; |
382 | const unsigned char *p = buf->head[0].iov_base; |
383 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, |
384 | RC_CSUMLEN); |
385 | size_t len = min(buf->head[0].iov_len, csum_len); |
386 | |
387 | /* rq_arg.head first */ |
388 | csum = csum_partial(buff: p, len, sum: 0); |
389 | csum_len -= len; |
390 | |
391 | /* Continue into page array */ |
392 | idx = buf->page_base / PAGE_SIZE; |
393 | base = buf->page_base & ~PAGE_MASK; |
394 | while (csum_len) { |
395 | p = page_address(buf->pages[idx]) + base; |
396 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
397 | csum = csum_partial(buff: p, len, sum: csum); |
398 | csum_len -= len; |
399 | base = 0; |
400 | ++idx; |
401 | } |
402 | return csum; |
403 | } |
404 | |
405 | static int |
406 | nfsd_cache_key_cmp(const struct nfsd_cacherep *key, |
407 | const struct nfsd_cacherep *rp, struct nfsd_net *nn) |
408 | { |
409 | if (key->c_key.k_xid == rp->c_key.k_xid && |
410 | key->c_key.k_csum != rp->c_key.k_csum) { |
411 | nfsd_stats_payload_misses_inc(nn); |
412 | trace_nfsd_drc_mismatch(nn, key, rp); |
413 | } |
414 | |
415 | return memcmp(p: &key->c_key, q: &rp->c_key, size: sizeof(key->c_key)); |
416 | } |
417 | |
418 | /* |
419 | * Search the request hash for an entry that matches the given rqstp. |
420 | * Must be called with cache_lock held. Returns the found entry or |
421 | * inserts an empty key on failure. |
422 | */ |
423 | static struct nfsd_cacherep * |
424 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key, |
425 | struct nfsd_net *nn) |
426 | { |
427 | struct nfsd_cacherep *rp, *ret = key; |
428 | struct rb_node **p = &b->rb_head.rb_node, |
429 | *parent = NULL; |
430 | unsigned int entries = 0; |
431 | int cmp; |
432 | |
433 | while (*p != NULL) { |
434 | ++entries; |
435 | parent = *p; |
436 | rp = rb_entry(parent, struct nfsd_cacherep, c_node); |
437 | |
438 | cmp = nfsd_cache_key_cmp(key, rp, nn); |
439 | if (cmp < 0) |
440 | p = &parent->rb_left; |
441 | else if (cmp > 0) |
442 | p = &parent->rb_right; |
443 | else { |
444 | ret = rp; |
445 | goto out; |
446 | } |
447 | } |
448 | rb_link_node(node: &key->c_node, parent, rb_link: p); |
449 | rb_insert_color(&key->c_node, &b->rb_head); |
450 | out: |
451 | /* tally hash chain length stats */ |
452 | if (entries > nn->longest_chain) { |
453 | nn->longest_chain = entries; |
454 | nn->longest_chain_cachesize = atomic_read(v: &nn->num_drc_entries); |
455 | } else if (entries == nn->longest_chain) { |
456 | /* prefer to keep the smallest cachesize possible here */ |
457 | nn->longest_chain_cachesize = min_t(unsigned int, |
458 | nn->longest_chain_cachesize, |
459 | atomic_read(&nn->num_drc_entries)); |
460 | } |
461 | |
462 | lru_put_end(b, rp: ret); |
463 | return ret; |
464 | } |
465 | |
466 | /** |
467 | * nfsd_cache_lookup - Find an entry in the duplicate reply cache |
468 | * @rqstp: Incoming Call to find |
469 | * @cacherep: OUT: DRC entry for this request |
470 | * |
471 | * Try to find an entry matching the current call in the cache. When none |
472 | * is found, we try to grab the oldest expired entry off the LRU list. If |
473 | * a suitable one isn't there, then drop the cache_lock and allocate a |
474 | * new one, then search again in case one got inserted while this thread |
475 | * didn't hold the lock. |
476 | * |
477 | * Return values: |
478 | * %RC_DOIT: Process the request normally |
479 | * %RC_REPLY: Reply from cache |
480 | * %RC_DROPIT: Do not process the request further |
481 | */ |
482 | int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep) |
483 | { |
484 | struct nfsd_net *nn; |
485 | struct nfsd_cacherep *rp, *found; |
486 | __wsum csum; |
487 | struct nfsd_drc_bucket *b; |
488 | int type = rqstp->rq_cachetype; |
489 | unsigned long freed; |
490 | LIST_HEAD(dispose); |
491 | int rtn = RC_DOIT; |
492 | |
493 | if (type == RC_NOCACHE) { |
494 | nfsd_stats_rc_nocache_inc(); |
495 | goto out; |
496 | } |
497 | |
498 | csum = nfsd_cache_csum(rqstp); |
499 | |
500 | /* |
501 | * Since the common case is a cache miss followed by an insert, |
502 | * preallocate an entry. |
503 | */ |
504 | nn = net_generic(SVC_NET(rqstp), id: nfsd_net_id); |
505 | rp = nfsd_cacherep_alloc(rqstp, csum, nn); |
506 | if (!rp) |
507 | goto out; |
508 | |
509 | b = nfsd_cache_bucket_find(xid: rqstp->rq_xid, nn); |
510 | spin_lock(lock: &b->cache_lock); |
511 | found = nfsd_cache_insert(b, key: rp, nn); |
512 | if (found != rp) |
513 | goto found_entry; |
514 | *cacherep = rp; |
515 | rp->c_state = RC_INPROG; |
516 | nfsd_prune_bucket_locked(nn, b, max: 3, dispose: &dispose); |
517 | spin_unlock(lock: &b->cache_lock); |
518 | |
519 | freed = nfsd_cacherep_dispose(dispose: &dispose); |
520 | trace_nfsd_drc_gc(nn, freed); |
521 | |
522 | nfsd_stats_rc_misses_inc(); |
523 | atomic_inc(v: &nn->num_drc_entries); |
524 | nfsd_stats_drc_mem_usage_add(nn, amount: sizeof(*rp)); |
525 | goto out; |
526 | |
527 | found_entry: |
528 | /* We found a matching entry which is either in progress or done. */ |
529 | nfsd_reply_cache_free_locked(NULL, rp, nn); |
530 | nfsd_stats_rc_hits_inc(); |
531 | rtn = RC_DROPIT; |
532 | rp = found; |
533 | |
534 | /* Request being processed */ |
535 | if (rp->c_state == RC_INPROG) |
536 | goto out_trace; |
537 | |
538 | /* From the hall of fame of impractical attacks: |
539 | * Is this a user who tries to snoop on the cache? */ |
540 | rtn = RC_DOIT; |
541 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
542 | goto out_trace; |
543 | |
544 | /* Compose RPC reply header */ |
545 | switch (rp->c_type) { |
546 | case RC_NOCACHE: |
547 | break; |
548 | case RC_REPLSTAT: |
549 | xdr_stream_encode_be32(xdr: &rqstp->rq_res_stream, n: rp->c_replstat); |
550 | rtn = RC_REPLY; |
551 | break; |
552 | case RC_REPLBUFF: |
553 | if (!nfsd_cache_append(rqstp, vec: &rp->c_replvec)) |
554 | goto out_unlock; /* should not happen */ |
555 | rtn = RC_REPLY; |
556 | break; |
557 | default: |
558 | WARN_ONCE(1, "nfsd: bad repcache type %d\n" , rp->c_type); |
559 | } |
560 | |
561 | out_trace: |
562 | trace_nfsd_drc_found(nn, rqstp, result: rtn); |
563 | out_unlock: |
564 | spin_unlock(lock: &b->cache_lock); |
565 | out: |
566 | return rtn; |
567 | } |
568 | |
569 | /** |
570 | * nfsd_cache_update - Update an entry in the duplicate reply cache. |
571 | * @rqstp: svc_rqst with a finished Reply |
572 | * @rp: IN: DRC entry for this request |
573 | * @cachetype: which cache to update |
574 | * @statp: pointer to Reply's NFS status code, or NULL |
575 | * |
576 | * This is called from nfsd_dispatch when the procedure has been |
577 | * executed and the complete reply is in rqstp->rq_res. |
578 | * |
579 | * We're copying around data here rather than swapping buffers because |
580 | * the toplevel loop requires max-sized buffers, which would be a waste |
581 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
582 | * |
583 | * If we should start to use different types of cache entries tailored |
584 | * specifically for attrstat and fh's, we may save even more space. |
585 | * |
586 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
587 | * nfsd failed to encode a reply that otherwise would have been cached. |
588 | * In this case, nfsd_cache_update is called with statp == NULL. |
589 | */ |
590 | void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, |
591 | int cachetype, __be32 *statp) |
592 | { |
593 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), id: nfsd_net_id); |
594 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
595 | struct nfsd_drc_bucket *b; |
596 | int len; |
597 | size_t bufsize = 0; |
598 | |
599 | if (!rp) |
600 | return; |
601 | |
602 | b = nfsd_cache_bucket_find(xid: rp->c_key.k_xid, nn); |
603 | |
604 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
605 | len >>= 2; |
606 | |
607 | /* Don't cache excessive amounts of data and XDR failures */ |
608 | if (!statp || len > (256 >> 2)) { |
609 | nfsd_reply_cache_free(b, rp, nn); |
610 | return; |
611 | } |
612 | |
613 | switch (cachetype) { |
614 | case RC_REPLSTAT: |
615 | if (len != 1) |
616 | printk("nfsd: RC_REPLSTAT/reply len %d!\n" ,len); |
617 | rp->c_replstat = *statp; |
618 | break; |
619 | case RC_REPLBUFF: |
620 | cachv = &rp->c_replvec; |
621 | bufsize = len << 2; |
622 | cachv->iov_base = kmalloc(size: bufsize, GFP_KERNEL); |
623 | if (!cachv->iov_base) { |
624 | nfsd_reply_cache_free(b, rp, nn); |
625 | return; |
626 | } |
627 | cachv->iov_len = bufsize; |
628 | memcpy(cachv->iov_base, statp, bufsize); |
629 | break; |
630 | case RC_NOCACHE: |
631 | nfsd_reply_cache_free(b, rp, nn); |
632 | return; |
633 | } |
634 | spin_lock(lock: &b->cache_lock); |
635 | nfsd_stats_drc_mem_usage_add(nn, amount: bufsize); |
636 | lru_put_end(b, rp); |
637 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
638 | rp->c_type = cachetype; |
639 | rp->c_state = RC_DONE; |
640 | spin_unlock(lock: &b->cache_lock); |
641 | return; |
642 | } |
643 | |
644 | /* |
645 | * Copy cached reply to current reply buffer. Should always fit. |
646 | * FIXME as reply is in a page, we should just attach the page, and |
647 | * keep a refcount.... |
648 | */ |
649 | static int |
650 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
651 | { |
652 | struct kvec *vec = &rqstp->rq_res.head[0]; |
653 | |
654 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
655 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n" , |
656 | data->iov_len); |
657 | return 0; |
658 | } |
659 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
660 | vec->iov_len += data->iov_len; |
661 | return 1; |
662 | } |
663 | |
664 | /* |
665 | * Note that fields may be added, removed or reordered in the future. Programs |
666 | * scraping this file for info should test the labels to ensure they're |
667 | * getting the correct field. |
668 | */ |
669 | int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) |
670 | { |
671 | struct nfsd_net *nn = net_generic(net: file_inode(f: m->file)->i_sb->s_fs_info, |
672 | id: nfsd_net_id); |
673 | |
674 | seq_printf(m, fmt: "max entries: %u\n" , nn->max_drc_entries); |
675 | seq_printf(m, fmt: "num entries: %u\n" , |
676 | atomic_read(v: &nn->num_drc_entries)); |
677 | seq_printf(m, fmt: "hash buckets: %u\n" , 1 << nn->maskbits); |
678 | seq_printf(m, fmt: "mem usage: %lld\n" , |
679 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_NET_DRC_MEM_USAGE])); |
680 | seq_printf(m, fmt: "cache hits: %lld\n" , |
681 | percpu_counter_sum_positive(fbc: &nfsdstats.counter[NFSD_STATS_RC_HITS])); |
682 | seq_printf(m, fmt: "cache misses: %lld\n" , |
683 | percpu_counter_sum_positive(fbc: &nfsdstats.counter[NFSD_STATS_RC_MISSES])); |
684 | seq_printf(m, fmt: "not cached: %lld\n" , |
685 | percpu_counter_sum_positive(fbc: &nfsdstats.counter[NFSD_STATS_RC_NOCACHE])); |
686 | seq_printf(m, fmt: "payload misses: %lld\n" , |
687 | percpu_counter_sum_positive(fbc: &nn->counter[NFSD_NET_PAYLOAD_MISSES])); |
688 | seq_printf(m, fmt: "longest chain len: %u\n" , nn->longest_chain); |
689 | seq_printf(m, fmt: "cachesize at longest: %u\n" , nn->longest_chain_cachesize); |
690 | return 0; |
691 | } |
692 | |