1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* handling of writes to regular files and writing back to the server |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/backing-dev.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/writeback.h> |
13 | #include <linux/pagevec.h> |
14 | #include <linux/netfs.h> |
15 | #include "internal.h" |
16 | |
17 | static int afs_writepages_region(struct address_space *mapping, |
18 | struct writeback_control *wbc, |
19 | loff_t start, loff_t end, loff_t *_next, |
20 | bool max_one_loop); |
21 | |
22 | static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, |
23 | loff_t i_size, bool caching); |
24 | |
25 | #ifdef CONFIG_AFS_FSCACHE |
26 | /* |
27 | * Mark a page as having been made dirty and thus needing writeback. We also |
28 | * need to pin the cache object to write back to. |
29 | */ |
30 | bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) |
31 | { |
32 | return fscache_dirty_folio(mapping, folio, |
33 | cookie: afs_vnode_cache(vnode: AFS_FS_I(inode: mapping->host))); |
34 | } |
35 | static void afs_folio_start_fscache(bool caching, struct folio *folio) |
36 | { |
37 | if (caching) |
38 | folio_start_fscache(folio); |
39 | } |
40 | #else |
41 | static void afs_folio_start_fscache(bool caching, struct folio *folio) |
42 | { |
43 | } |
44 | #endif |
45 | |
46 | /* |
47 | * Flush out a conflicting write. This may extend the write to the surrounding |
48 | * pages if also dirty and contiguous to the conflicting region.. |
49 | */ |
50 | static int afs_flush_conflicting_write(struct address_space *mapping, |
51 | struct folio *folio) |
52 | { |
53 | struct writeback_control wbc = { |
54 | .sync_mode = WB_SYNC_ALL, |
55 | .nr_to_write = LONG_MAX, |
56 | .range_start = folio_pos(folio), |
57 | .range_end = LLONG_MAX, |
58 | }; |
59 | loff_t next; |
60 | |
61 | return afs_writepages_region(mapping, wbc: &wbc, start: folio_pos(folio), LLONG_MAX, |
62 | next: &next, max_one_loop: true); |
63 | } |
64 | |
65 | /* |
66 | * prepare to perform part of a write to a page |
67 | */ |
68 | int afs_write_begin(struct file *file, struct address_space *mapping, |
69 | loff_t pos, unsigned len, |
70 | struct page **_page, void **fsdata) |
71 | { |
72 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
73 | struct folio *folio; |
74 | unsigned long priv; |
75 | unsigned f, from; |
76 | unsigned t, to; |
77 | pgoff_t index; |
78 | int ret; |
79 | |
80 | _enter("{%llx:%llu},%llx,%x" , |
81 | vnode->fid.vid, vnode->fid.vnode, pos, len); |
82 | |
83 | /* Prefetch area to be written into the cache if we're caching this |
84 | * file. We need to do this before we get a lock on the page in case |
85 | * there's more than one writer competing for the same cache block. |
86 | */ |
87 | ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); |
88 | if (ret < 0) |
89 | return ret; |
90 | |
91 | index = folio_index(folio); |
92 | from = pos - index * PAGE_SIZE; |
93 | to = from + len; |
94 | |
95 | try_again: |
96 | /* See if this page is already partially written in a way that we can |
97 | * merge the new write with. |
98 | */ |
99 | if (folio_test_private(folio)) { |
100 | priv = (unsigned long)folio_get_private(folio); |
101 | f = afs_folio_dirty_from(folio, priv); |
102 | t = afs_folio_dirty_to(folio, priv); |
103 | ASSERTCMP(f, <=, t); |
104 | |
105 | if (folio_test_writeback(folio)) { |
106 | trace_afs_folio_dirty(vnode, tracepoint_string("alrdy" ), folio); |
107 | folio_unlock(folio); |
108 | goto wait_for_writeback; |
109 | } |
110 | /* If the file is being filled locally, allow inter-write |
111 | * spaces to be merged into writes. If it's not, only write |
112 | * back what the user gives us. |
113 | */ |
114 | if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && |
115 | (to < f || from > t)) |
116 | goto flush_conflicting_write; |
117 | } |
118 | |
119 | *_page = folio_file_page(folio, index: pos / PAGE_SIZE); |
120 | _leave(" = 0" ); |
121 | return 0; |
122 | |
123 | /* The previous write and this write aren't adjacent or overlapping, so |
124 | * flush the page out. |
125 | */ |
126 | flush_conflicting_write: |
127 | trace_afs_folio_dirty(vnode, tracepoint_string("confl" ), folio); |
128 | folio_unlock(folio); |
129 | |
130 | ret = afs_flush_conflicting_write(mapping, folio); |
131 | if (ret < 0) |
132 | goto error; |
133 | |
134 | wait_for_writeback: |
135 | ret = folio_wait_writeback_killable(folio); |
136 | if (ret < 0) |
137 | goto error; |
138 | |
139 | ret = folio_lock_killable(folio); |
140 | if (ret < 0) |
141 | goto error; |
142 | goto try_again; |
143 | |
144 | error: |
145 | folio_put(folio); |
146 | _leave(" = %d" , ret); |
147 | return ret; |
148 | } |
149 | |
150 | /* |
151 | * finalise part of a write to a page |
152 | */ |
153 | int afs_write_end(struct file *file, struct address_space *mapping, |
154 | loff_t pos, unsigned len, unsigned copied, |
155 | struct page *subpage, void *fsdata) |
156 | { |
157 | struct folio *folio = page_folio(subpage); |
158 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
159 | unsigned long priv; |
160 | unsigned int f, from = offset_in_folio(folio, pos); |
161 | unsigned int t, to = from + copied; |
162 | loff_t i_size, write_end_pos; |
163 | |
164 | _enter("{%llx:%llu},{%lx}" , |
165 | vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); |
166 | |
167 | if (!folio_test_uptodate(folio)) { |
168 | if (copied < len) { |
169 | copied = 0; |
170 | goto out; |
171 | } |
172 | |
173 | folio_mark_uptodate(folio); |
174 | } |
175 | |
176 | if (copied == 0) |
177 | goto out; |
178 | |
179 | write_end_pos = pos + copied; |
180 | |
181 | i_size = i_size_read(inode: &vnode->netfs.inode); |
182 | if (write_end_pos > i_size) { |
183 | write_seqlock(sl: &vnode->cb_lock); |
184 | i_size = i_size_read(inode: &vnode->netfs.inode); |
185 | if (write_end_pos > i_size) |
186 | afs_set_i_size(vnode, size: write_end_pos); |
187 | write_sequnlock(sl: &vnode->cb_lock); |
188 | fscache_update_cookie(cookie: afs_vnode_cache(vnode), NULL, object_size: &write_end_pos); |
189 | } |
190 | |
191 | if (folio_test_private(folio)) { |
192 | priv = (unsigned long)folio_get_private(folio); |
193 | f = afs_folio_dirty_from(folio, priv); |
194 | t = afs_folio_dirty_to(folio, priv); |
195 | if (from < f) |
196 | f = from; |
197 | if (to > t) |
198 | t = to; |
199 | priv = afs_folio_dirty(folio, from: f, to: t); |
200 | folio_change_private(folio, data: (void *)priv); |
201 | trace_afs_folio_dirty(vnode, tracepoint_string("dirty+" ), folio); |
202 | } else { |
203 | priv = afs_folio_dirty(folio, from, to); |
204 | folio_attach_private(folio, data: (void *)priv); |
205 | trace_afs_folio_dirty(vnode, tracepoint_string("dirty" ), folio); |
206 | } |
207 | |
208 | if (folio_mark_dirty(folio)) |
209 | _debug("dirtied %lx" , folio_index(folio)); |
210 | |
211 | out: |
212 | folio_unlock(folio); |
213 | folio_put(folio); |
214 | return copied; |
215 | } |
216 | |
217 | /* |
218 | * kill all the pages in the given range |
219 | */ |
220 | static void afs_kill_pages(struct address_space *mapping, |
221 | loff_t start, loff_t len) |
222 | { |
223 | struct afs_vnode *vnode = AFS_FS_I(inode: mapping->host); |
224 | struct folio *folio; |
225 | pgoff_t index = start / PAGE_SIZE; |
226 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; |
227 | |
228 | _enter("{%llx:%llu},%llx @%llx" , |
229 | vnode->fid.vid, vnode->fid.vnode, len, start); |
230 | |
231 | do { |
232 | _debug("kill %lx (to %lx)" , index, last); |
233 | |
234 | folio = filemap_get_folio(mapping, index); |
235 | if (IS_ERR(ptr: folio)) { |
236 | next = index + 1; |
237 | continue; |
238 | } |
239 | |
240 | next = folio_next_index(folio); |
241 | |
242 | folio_clear_uptodate(folio); |
243 | folio_end_writeback(folio); |
244 | folio_lock(folio); |
245 | generic_error_remove_page(mapping, page: &folio->page); |
246 | folio_unlock(folio); |
247 | folio_put(folio); |
248 | |
249 | } while (index = next, index <= last); |
250 | |
251 | _leave("" ); |
252 | } |
253 | |
254 | /* |
255 | * Redirty all the pages in a given range. |
256 | */ |
257 | static void afs_redirty_pages(struct writeback_control *wbc, |
258 | struct address_space *mapping, |
259 | loff_t start, loff_t len) |
260 | { |
261 | struct afs_vnode *vnode = AFS_FS_I(inode: mapping->host); |
262 | struct folio *folio; |
263 | pgoff_t index = start / PAGE_SIZE; |
264 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; |
265 | |
266 | _enter("{%llx:%llu},%llx @%llx" , |
267 | vnode->fid.vid, vnode->fid.vnode, len, start); |
268 | |
269 | do { |
270 | _debug("redirty %llx @%llx" , len, start); |
271 | |
272 | folio = filemap_get_folio(mapping, index); |
273 | if (IS_ERR(ptr: folio)) { |
274 | next = index + 1; |
275 | continue; |
276 | } |
277 | |
278 | next = index + folio_nr_pages(folio); |
279 | folio_redirty_for_writepage(wbc, folio); |
280 | folio_end_writeback(folio); |
281 | folio_put(folio); |
282 | } while (index = next, index <= last); |
283 | |
284 | _leave("" ); |
285 | } |
286 | |
287 | /* |
288 | * completion of write to server |
289 | */ |
290 | static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) |
291 | { |
292 | struct address_space *mapping = vnode->netfs.inode.i_mapping; |
293 | struct folio *folio; |
294 | pgoff_t end; |
295 | |
296 | XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); |
297 | |
298 | _enter("{%llx:%llu},{%x @%llx}" , |
299 | vnode->fid.vid, vnode->fid.vnode, len, start); |
300 | |
301 | rcu_read_lock(); |
302 | |
303 | end = (start + len - 1) / PAGE_SIZE; |
304 | xas_for_each(&xas, folio, end) { |
305 | if (!folio_test_writeback(folio)) { |
306 | kdebug("bad %x @%llx page %lx %lx" , |
307 | len, start, folio_index(folio), end); |
308 | ASSERT(folio_test_writeback(folio)); |
309 | } |
310 | |
311 | trace_afs_folio_dirty(vnode, tracepoint_string("clear" ), folio); |
312 | folio_detach_private(folio); |
313 | folio_end_writeback(folio); |
314 | } |
315 | |
316 | rcu_read_unlock(); |
317 | |
318 | afs_prune_wb_keys(vnode); |
319 | _leave("" ); |
320 | } |
321 | |
322 | /* |
323 | * Find a key to use for the writeback. We cached the keys used to author the |
324 | * writes on the vnode. *_wbk will contain the last writeback key used or NULL |
325 | * and we need to start from there if it's set. |
326 | */ |
327 | static int afs_get_writeback_key(struct afs_vnode *vnode, |
328 | struct afs_wb_key **_wbk) |
329 | { |
330 | struct afs_wb_key *wbk = NULL; |
331 | struct list_head *p; |
332 | int ret = -ENOKEY, ret2; |
333 | |
334 | spin_lock(lock: &vnode->wb_lock); |
335 | if (*_wbk) |
336 | p = (*_wbk)->vnode_link.next; |
337 | else |
338 | p = vnode->wb_keys.next; |
339 | |
340 | while (p != &vnode->wb_keys) { |
341 | wbk = list_entry(p, struct afs_wb_key, vnode_link); |
342 | _debug("wbk %u" , key_serial(wbk->key)); |
343 | ret2 = key_validate(key: wbk->key); |
344 | if (ret2 == 0) { |
345 | refcount_inc(r: &wbk->usage); |
346 | _debug("USE WB KEY %u" , key_serial(wbk->key)); |
347 | break; |
348 | } |
349 | |
350 | wbk = NULL; |
351 | if (ret == -ENOKEY) |
352 | ret = ret2; |
353 | p = p->next; |
354 | } |
355 | |
356 | spin_unlock(lock: &vnode->wb_lock); |
357 | if (*_wbk) |
358 | afs_put_wb_key(*_wbk); |
359 | *_wbk = wbk; |
360 | return 0; |
361 | } |
362 | |
363 | static void afs_store_data_success(struct afs_operation *op) |
364 | { |
365 | struct afs_vnode *vnode = op->file[0].vnode; |
366 | |
367 | op->ctime = op->file[0].scb.status.mtime_client; |
368 | afs_vnode_commit_status(op, &op->file[0]); |
369 | if (op->error == 0) { |
370 | if (!op->store.laundering) |
371 | afs_pages_written_back(vnode, start: op->store.pos, len: op->store.size); |
372 | afs_stat_v(vnode, n_stores); |
373 | atomic_long_add(i: op->store.size, v: &afs_v2net(vnode)->n_store_bytes); |
374 | } |
375 | } |
376 | |
377 | static const struct afs_operation_ops afs_store_data_operation = { |
378 | .issue_afs_rpc = afs_fs_store_data, |
379 | .issue_yfs_rpc = yfs_fs_store_data, |
380 | .success = afs_store_data_success, |
381 | }; |
382 | |
383 | /* |
384 | * write to a file |
385 | */ |
386 | static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, |
387 | bool laundering) |
388 | { |
389 | struct afs_operation *op; |
390 | struct afs_wb_key *wbk = NULL; |
391 | loff_t size = iov_iter_count(i: iter); |
392 | int ret = -ENOKEY; |
393 | |
394 | _enter("%s{%llx:%llu.%u},%llx,%llx" , |
395 | vnode->volume->name, |
396 | vnode->fid.vid, |
397 | vnode->fid.vnode, |
398 | vnode->fid.unique, |
399 | size, pos); |
400 | |
401 | ret = afs_get_writeback_key(vnode, wbk: &wbk); |
402 | if (ret) { |
403 | _leave(" = %d [no keys]" , ret); |
404 | return ret; |
405 | } |
406 | |
407 | op = afs_alloc_operation(wbk->key, vnode->volume); |
408 | if (IS_ERR(ptr: op)) { |
409 | afs_put_wb_key(wbk); |
410 | return -ENOMEM; |
411 | } |
412 | |
413 | afs_op_set_vnode(op, n: 0, vnode); |
414 | op->file[0].dv_delta = 1; |
415 | op->file[0].modification = true; |
416 | op->store.pos = pos; |
417 | op->store.size = size; |
418 | op->store.laundering = laundering; |
419 | op->flags |= AFS_OPERATION_UNINTR; |
420 | op->ops = &afs_store_data_operation; |
421 | |
422 | try_next_key: |
423 | afs_begin_vnode_operation(op); |
424 | |
425 | op->store.write_iter = iter; |
426 | op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); |
427 | op->mtime = inode_get_mtime(inode: &vnode->netfs.inode); |
428 | |
429 | afs_wait_for_operation(op); |
430 | |
431 | switch (op->error) { |
432 | case -EACCES: |
433 | case -EPERM: |
434 | case -ENOKEY: |
435 | case -EKEYEXPIRED: |
436 | case -EKEYREJECTED: |
437 | case -EKEYREVOKED: |
438 | _debug("next" ); |
439 | |
440 | ret = afs_get_writeback_key(vnode, wbk: &wbk); |
441 | if (ret == 0) { |
442 | key_put(key: op->key); |
443 | op->key = key_get(key: wbk->key); |
444 | goto try_next_key; |
445 | } |
446 | break; |
447 | } |
448 | |
449 | afs_put_wb_key(wbk); |
450 | _leave(" = %d" , op->error); |
451 | return afs_put_operation(op); |
452 | } |
453 | |
454 | /* |
455 | * Extend the region to be written back to include subsequent contiguously |
456 | * dirty pages if possible, but don't sleep while doing so. |
457 | * |
458 | * If this page holds new content, then we can include filler zeros in the |
459 | * writeback. |
460 | */ |
461 | static void afs_extend_writeback(struct address_space *mapping, |
462 | struct afs_vnode *vnode, |
463 | long *_count, |
464 | loff_t start, |
465 | loff_t max_len, |
466 | bool new_content, |
467 | bool caching, |
468 | unsigned int *_len) |
469 | { |
470 | struct folio_batch fbatch; |
471 | struct folio *folio; |
472 | unsigned long priv; |
473 | unsigned int psize, filler = 0; |
474 | unsigned int f, t; |
475 | loff_t len = *_len; |
476 | pgoff_t index = (start + len) / PAGE_SIZE; |
477 | bool stop = true; |
478 | unsigned int i; |
479 | |
480 | XA_STATE(xas, &mapping->i_pages, index); |
481 | folio_batch_init(fbatch: &fbatch); |
482 | |
483 | do { |
484 | /* Firstly, we gather up a batch of contiguous dirty pages |
485 | * under the RCU read lock - but we can't clear the dirty flags |
486 | * there if any of those pages are mapped. |
487 | */ |
488 | rcu_read_lock(); |
489 | |
490 | xas_for_each(&xas, folio, ULONG_MAX) { |
491 | stop = true; |
492 | if (xas_retry(xas: &xas, entry: folio)) |
493 | continue; |
494 | if (xa_is_value(entry: folio)) |
495 | break; |
496 | if (folio_index(folio) != index) |
497 | break; |
498 | |
499 | if (!folio_try_get_rcu(folio)) { |
500 | xas_reset(xas: &xas); |
501 | continue; |
502 | } |
503 | |
504 | /* Has the page moved or been split? */ |
505 | if (unlikely(folio != xas_reload(&xas))) { |
506 | folio_put(folio); |
507 | break; |
508 | } |
509 | |
510 | if (!folio_trylock(folio)) { |
511 | folio_put(folio); |
512 | break; |
513 | } |
514 | if (!folio_test_dirty(folio) || |
515 | folio_test_writeback(folio) || |
516 | folio_test_fscache(folio)) { |
517 | folio_unlock(folio); |
518 | folio_put(folio); |
519 | break; |
520 | } |
521 | |
522 | psize = folio_size(folio); |
523 | priv = (unsigned long)folio_get_private(folio); |
524 | f = afs_folio_dirty_from(folio, priv); |
525 | t = afs_folio_dirty_to(folio, priv); |
526 | if (f != 0 && !new_content) { |
527 | folio_unlock(folio); |
528 | folio_put(folio); |
529 | break; |
530 | } |
531 | |
532 | len += filler + t; |
533 | filler = psize - t; |
534 | if (len >= max_len || *_count <= 0) |
535 | stop = true; |
536 | else if (t == psize || new_content) |
537 | stop = false; |
538 | |
539 | index += folio_nr_pages(folio); |
540 | if (!folio_batch_add(fbatch: &fbatch, folio)) |
541 | break; |
542 | if (stop) |
543 | break; |
544 | } |
545 | |
546 | if (!stop) |
547 | xas_pause(&xas); |
548 | rcu_read_unlock(); |
549 | |
550 | /* Now, if we obtained any folios, we can shift them to being |
551 | * writable and mark them for caching. |
552 | */ |
553 | if (!folio_batch_count(fbatch: &fbatch)) |
554 | break; |
555 | |
556 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) { |
557 | folio = fbatch.folios[i]; |
558 | trace_afs_folio_dirty(vnode, tracepoint_string("store+" ), folio); |
559 | |
560 | if (!folio_clear_dirty_for_io(folio)) |
561 | BUG(); |
562 | if (folio_start_writeback(folio)) |
563 | BUG(); |
564 | afs_folio_start_fscache(caching, folio); |
565 | |
566 | *_count -= folio_nr_pages(folio); |
567 | folio_unlock(folio); |
568 | } |
569 | |
570 | folio_batch_release(fbatch: &fbatch); |
571 | cond_resched(); |
572 | } while (!stop); |
573 | |
574 | *_len = len; |
575 | } |
576 | |
577 | /* |
578 | * Synchronously write back the locked page and any subsequent non-locked dirty |
579 | * pages. |
580 | */ |
581 | static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, |
582 | struct writeback_control *wbc, |
583 | struct folio *folio, |
584 | loff_t start, loff_t end) |
585 | { |
586 | struct afs_vnode *vnode = AFS_FS_I(inode: mapping->host); |
587 | struct iov_iter iter; |
588 | unsigned long priv; |
589 | unsigned int offset, to, len, max_len; |
590 | loff_t i_size = i_size_read(inode: &vnode->netfs.inode); |
591 | bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); |
592 | bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); |
593 | long count = wbc->nr_to_write; |
594 | int ret; |
595 | |
596 | _enter(",%lx,%llx-%llx" , folio_index(folio), start, end); |
597 | |
598 | if (folio_start_writeback(folio)) |
599 | BUG(); |
600 | afs_folio_start_fscache(caching, folio); |
601 | |
602 | count -= folio_nr_pages(folio); |
603 | |
604 | /* Find all consecutive lockable dirty pages that have contiguous |
605 | * written regions, stopping when we find a page that is not |
606 | * immediately lockable, is not dirty or is missing, or we reach the |
607 | * end of the range. |
608 | */ |
609 | priv = (unsigned long)folio_get_private(folio); |
610 | offset = afs_folio_dirty_from(folio, priv); |
611 | to = afs_folio_dirty_to(folio, priv); |
612 | trace_afs_folio_dirty(vnode, tracepoint_string("store" ), folio); |
613 | |
614 | len = to - offset; |
615 | start += offset; |
616 | if (start < i_size) { |
617 | /* Trim the write to the EOF; the extra data is ignored. Also |
618 | * put an upper limit on the size of a single storedata op. |
619 | */ |
620 | max_len = 65536 * 4096; |
621 | max_len = min_t(unsigned long long, max_len, end - start + 1); |
622 | max_len = min_t(unsigned long long, max_len, i_size - start); |
623 | |
624 | if (len < max_len && |
625 | (to == folio_size(folio) || new_content)) |
626 | afs_extend_writeback(mapping, vnode, count: &count, |
627 | start, max_len, new_content, |
628 | caching, len: &len); |
629 | len = min_t(loff_t, len, max_len); |
630 | } |
631 | |
632 | /* We now have a contiguous set of dirty pages, each with writeback |
633 | * set; the first page is still locked at this point, but all the rest |
634 | * have been unlocked. |
635 | */ |
636 | folio_unlock(folio); |
637 | |
638 | if (start < i_size) { |
639 | _debug("write back %x @%llx [%llx]" , len, start, i_size); |
640 | |
641 | /* Speculatively write to the cache. We have to fix this up |
642 | * later if the store fails. |
643 | */ |
644 | afs_write_to_cache(vnode, start, len, i_size, caching); |
645 | |
646 | iov_iter_xarray(i: &iter, ITER_SOURCE, xarray: &mapping->i_pages, start, count: len); |
647 | ret = afs_store_data(vnode, iter: &iter, pos: start, laundering: false); |
648 | } else { |
649 | _debug("write discard %x @%llx [%llx]" , len, start, i_size); |
650 | |
651 | /* The dirty region was entirely beyond the EOF. */ |
652 | fscache_clear_page_bits(mapping, start, len, caching); |
653 | afs_pages_written_back(vnode, start, len); |
654 | ret = 0; |
655 | } |
656 | |
657 | switch (ret) { |
658 | case 0: |
659 | wbc->nr_to_write = count; |
660 | ret = len; |
661 | break; |
662 | |
663 | default: |
664 | pr_notice("kAFS: Unexpected error from FS.StoreData %d\n" , ret); |
665 | fallthrough; |
666 | case -EACCES: |
667 | case -EPERM: |
668 | case -ENOKEY: |
669 | case -EKEYEXPIRED: |
670 | case -EKEYREJECTED: |
671 | case -EKEYREVOKED: |
672 | case -ENETRESET: |
673 | afs_redirty_pages(wbc, mapping, start, len); |
674 | mapping_set_error(mapping, error: ret); |
675 | break; |
676 | |
677 | case -EDQUOT: |
678 | case -ENOSPC: |
679 | afs_redirty_pages(wbc, mapping, start, len); |
680 | mapping_set_error(mapping, error: -ENOSPC); |
681 | break; |
682 | |
683 | case -EROFS: |
684 | case -EIO: |
685 | case -EREMOTEIO: |
686 | case -EFBIG: |
687 | case -ENOENT: |
688 | case -ENOMEDIUM: |
689 | case -ENXIO: |
690 | trace_afs_file_error(vnode, error: ret, where: afs_file_error_writeback_fail); |
691 | afs_kill_pages(mapping, start, len); |
692 | mapping_set_error(mapping, error: ret); |
693 | break; |
694 | } |
695 | |
696 | _leave(" = %d" , ret); |
697 | return ret; |
698 | } |
699 | |
700 | /* |
701 | * write a region of pages back to the server |
702 | */ |
703 | static int afs_writepages_region(struct address_space *mapping, |
704 | struct writeback_control *wbc, |
705 | loff_t start, loff_t end, loff_t *_next, |
706 | bool max_one_loop) |
707 | { |
708 | struct folio *folio; |
709 | struct folio_batch fbatch; |
710 | ssize_t ret; |
711 | unsigned int i; |
712 | int n, skips = 0; |
713 | |
714 | _enter("%llx,%llx," , start, end); |
715 | folio_batch_init(fbatch: &fbatch); |
716 | |
717 | do { |
718 | pgoff_t index = start / PAGE_SIZE; |
719 | |
720 | n = filemap_get_folios_tag(mapping, start: &index, end: end / PAGE_SIZE, |
721 | PAGECACHE_TAG_DIRTY, fbatch: &fbatch); |
722 | |
723 | if (!n) |
724 | break; |
725 | for (i = 0; i < n; i++) { |
726 | folio = fbatch.folios[i]; |
727 | start = folio_pos(folio); /* May regress with THPs */ |
728 | |
729 | _debug("wback %lx" , folio_index(folio)); |
730 | |
731 | /* At this point we hold neither the i_pages lock nor the |
732 | * page lock: the page may be truncated or invalidated |
733 | * (changing page->mapping to NULL), or even swizzled |
734 | * back from swapper_space to tmpfs file mapping |
735 | */ |
736 | try_again: |
737 | if (wbc->sync_mode != WB_SYNC_NONE) { |
738 | ret = folio_lock_killable(folio); |
739 | if (ret < 0) { |
740 | folio_batch_release(fbatch: &fbatch); |
741 | return ret; |
742 | } |
743 | } else { |
744 | if (!folio_trylock(folio)) |
745 | continue; |
746 | } |
747 | |
748 | if (folio->mapping != mapping || |
749 | !folio_test_dirty(folio)) { |
750 | start += folio_size(folio); |
751 | folio_unlock(folio); |
752 | continue; |
753 | } |
754 | |
755 | if (folio_test_writeback(folio) || |
756 | folio_test_fscache(folio)) { |
757 | folio_unlock(folio); |
758 | if (wbc->sync_mode != WB_SYNC_NONE) { |
759 | folio_wait_writeback(folio); |
760 | #ifdef CONFIG_AFS_FSCACHE |
761 | folio_wait_fscache(folio); |
762 | #endif |
763 | goto try_again; |
764 | } |
765 | |
766 | start += folio_size(folio); |
767 | if (wbc->sync_mode == WB_SYNC_NONE) { |
768 | if (skips >= 5 || need_resched()) { |
769 | *_next = start; |
770 | folio_batch_release(fbatch: &fbatch); |
771 | _leave(" = 0 [%llx]" , *_next); |
772 | return 0; |
773 | } |
774 | skips++; |
775 | } |
776 | continue; |
777 | } |
778 | |
779 | if (!folio_clear_dirty_for_io(folio)) |
780 | BUG(); |
781 | ret = afs_write_back_from_locked_folio(mapping, wbc, |
782 | folio, start, end); |
783 | if (ret < 0) { |
784 | _leave(" = %zd" , ret); |
785 | folio_batch_release(fbatch: &fbatch); |
786 | return ret; |
787 | } |
788 | |
789 | start += ret; |
790 | } |
791 | |
792 | folio_batch_release(fbatch: &fbatch); |
793 | cond_resched(); |
794 | } while (wbc->nr_to_write > 0); |
795 | |
796 | *_next = start; |
797 | _leave(" = 0 [%llx]" , *_next); |
798 | return 0; |
799 | } |
800 | |
801 | /* |
802 | * write some of the pending data back to the server |
803 | */ |
804 | int afs_writepages(struct address_space *mapping, |
805 | struct writeback_control *wbc) |
806 | { |
807 | struct afs_vnode *vnode = AFS_FS_I(inode: mapping->host); |
808 | loff_t start, next; |
809 | int ret; |
810 | |
811 | _enter("" ); |
812 | |
813 | /* We have to be careful as we can end up racing with setattr() |
814 | * truncating the pagecache since the caller doesn't take a lock here |
815 | * to prevent it. |
816 | */ |
817 | if (wbc->sync_mode == WB_SYNC_ALL) |
818 | down_read(sem: &vnode->validate_lock); |
819 | else if (!down_read_trylock(sem: &vnode->validate_lock)) |
820 | return 0; |
821 | |
822 | if (wbc->range_cyclic) { |
823 | start = mapping->writeback_index * PAGE_SIZE; |
824 | ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, |
825 | next: &next, max_one_loop: false); |
826 | if (ret == 0) { |
827 | mapping->writeback_index = next / PAGE_SIZE; |
828 | if (start > 0 && wbc->nr_to_write > 0) { |
829 | ret = afs_writepages_region(mapping, wbc, start: 0, |
830 | end: start, next: &next, max_one_loop: false); |
831 | if (ret == 0) |
832 | mapping->writeback_index = |
833 | next / PAGE_SIZE; |
834 | } |
835 | } |
836 | } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { |
837 | ret = afs_writepages_region(mapping, wbc, start: 0, LLONG_MAX, |
838 | next: &next, max_one_loop: false); |
839 | if (wbc->nr_to_write > 0 && ret == 0) |
840 | mapping->writeback_index = next / PAGE_SIZE; |
841 | } else { |
842 | ret = afs_writepages_region(mapping, wbc, |
843 | start: wbc->range_start, end: wbc->range_end, |
844 | next: &next, max_one_loop: false); |
845 | } |
846 | |
847 | up_read(sem: &vnode->validate_lock); |
848 | _leave(" = %d" , ret); |
849 | return ret; |
850 | } |
851 | |
852 | /* |
853 | * write to an AFS file |
854 | */ |
855 | ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) |
856 | { |
857 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: iocb->ki_filp)); |
858 | struct afs_file *af = iocb->ki_filp->private_data; |
859 | ssize_t result; |
860 | size_t count = iov_iter_count(i: from); |
861 | |
862 | _enter("{%llx:%llu},{%zu}," , |
863 | vnode->fid.vid, vnode->fid.vnode, count); |
864 | |
865 | if (IS_SWAPFILE(&vnode->netfs.inode)) { |
866 | printk(KERN_INFO |
867 | "AFS: Attempt to write to active swap file!\n" ); |
868 | return -EBUSY; |
869 | } |
870 | |
871 | if (!count) |
872 | return 0; |
873 | |
874 | result = afs_validate(vnode, af->key); |
875 | if (result < 0) |
876 | return result; |
877 | |
878 | result = generic_file_write_iter(iocb, from); |
879 | |
880 | _leave(" = %zd" , result); |
881 | return result; |
882 | } |
883 | |
884 | /* |
885 | * flush any dirty pages for this process, and check for write errors. |
886 | * - the return status from this call provides a reliable indication of |
887 | * whether any write errors occurred for this process. |
888 | */ |
889 | int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
890 | { |
891 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
892 | struct afs_file *af = file->private_data; |
893 | int ret; |
894 | |
895 | _enter("{%llx:%llu},{n=%pD},%d" , |
896 | vnode->fid.vid, vnode->fid.vnode, file, |
897 | datasync); |
898 | |
899 | ret = afs_validate(vnode, af->key); |
900 | if (ret < 0) |
901 | return ret; |
902 | |
903 | return file_write_and_wait_range(file, start, end); |
904 | } |
905 | |
906 | /* |
907 | * notification that a previously read-only page is about to become writable |
908 | * - if it returns an error, the caller will deliver a bus error signal |
909 | */ |
910 | vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) |
911 | { |
912 | struct folio *folio = page_folio(vmf->page); |
913 | struct file *file = vmf->vma->vm_file; |
914 | struct inode *inode = file_inode(f: file); |
915 | struct afs_vnode *vnode = AFS_FS_I(inode); |
916 | struct afs_file *af = file->private_data; |
917 | unsigned long priv; |
918 | vm_fault_t ret = VM_FAULT_RETRY; |
919 | |
920 | _enter("{{%llx:%llu}},{%lx}" , vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); |
921 | |
922 | afs_validate(vnode, af->key); |
923 | |
924 | sb_start_pagefault(sb: inode->i_sb); |
925 | |
926 | /* Wait for the page to be written to the cache before we allow it to |
927 | * be modified. We then assume the entire page will need writing back. |
928 | */ |
929 | #ifdef CONFIG_AFS_FSCACHE |
930 | if (folio_test_fscache(folio) && |
931 | folio_wait_fscache_killable(folio) < 0) |
932 | goto out; |
933 | #endif |
934 | |
935 | if (folio_wait_writeback_killable(folio)) |
936 | goto out; |
937 | |
938 | if (folio_lock_killable(folio) < 0) |
939 | goto out; |
940 | |
941 | /* We mustn't change folio->private until writeback is complete as that |
942 | * details the portion of the page we need to write back and we might |
943 | * need to redirty the page if there's a problem. |
944 | */ |
945 | if (folio_wait_writeback_killable(folio) < 0) { |
946 | folio_unlock(folio); |
947 | goto out; |
948 | } |
949 | |
950 | priv = afs_folio_dirty(folio, from: 0, to: folio_size(folio)); |
951 | priv = afs_folio_dirty_mmapped(priv); |
952 | if (folio_test_private(folio)) { |
953 | folio_change_private(folio, data: (void *)priv); |
954 | trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+" ), folio); |
955 | } else { |
956 | folio_attach_private(folio, data: (void *)priv); |
957 | trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite" ), folio); |
958 | } |
959 | file_update_time(file); |
960 | |
961 | ret = VM_FAULT_LOCKED; |
962 | out: |
963 | sb_end_pagefault(sb: inode->i_sb); |
964 | return ret; |
965 | } |
966 | |
967 | /* |
968 | * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. |
969 | */ |
970 | void afs_prune_wb_keys(struct afs_vnode *vnode) |
971 | { |
972 | LIST_HEAD(graveyard); |
973 | struct afs_wb_key *wbk, *tmp; |
974 | |
975 | /* Discard unused keys */ |
976 | spin_lock(lock: &vnode->wb_lock); |
977 | |
978 | if (!mapping_tagged(mapping: &vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && |
979 | !mapping_tagged(mapping: &vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { |
980 | list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { |
981 | if (refcount_read(r: &wbk->usage) == 1) |
982 | list_move(list: &wbk->vnode_link, head: &graveyard); |
983 | } |
984 | } |
985 | |
986 | spin_unlock(lock: &vnode->wb_lock); |
987 | |
988 | while (!list_empty(head: &graveyard)) { |
989 | wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); |
990 | list_del(entry: &wbk->vnode_link); |
991 | afs_put_wb_key(wbk); |
992 | } |
993 | } |
994 | |
995 | /* |
996 | * Clean up a page during invalidation. |
997 | */ |
998 | int afs_launder_folio(struct folio *folio) |
999 | { |
1000 | struct afs_vnode *vnode = AFS_FS_I(inode: folio_inode(folio)); |
1001 | struct iov_iter iter; |
1002 | struct bio_vec bv; |
1003 | unsigned long priv; |
1004 | unsigned int f, t; |
1005 | int ret = 0; |
1006 | |
1007 | _enter("{%lx}" , folio->index); |
1008 | |
1009 | priv = (unsigned long)folio_get_private(folio); |
1010 | if (folio_clear_dirty_for_io(folio)) { |
1011 | f = 0; |
1012 | t = folio_size(folio); |
1013 | if (folio_test_private(folio)) { |
1014 | f = afs_folio_dirty_from(folio, priv); |
1015 | t = afs_folio_dirty_to(folio, priv); |
1016 | } |
1017 | |
1018 | bvec_set_folio(bv: &bv, folio, len: t - f, offset: f); |
1019 | iov_iter_bvec(i: &iter, ITER_SOURCE, bvec: &bv, nr_segs: 1, count: bv.bv_len); |
1020 | |
1021 | trace_afs_folio_dirty(vnode, tracepoint_string("launder" ), folio); |
1022 | ret = afs_store_data(vnode, iter: &iter, pos: folio_pos(folio) + f, laundering: true); |
1023 | } |
1024 | |
1025 | trace_afs_folio_dirty(vnode, tracepoint_string("laundered" ), folio); |
1026 | folio_detach_private(folio); |
1027 | folio_wait_fscache(folio); |
1028 | return ret; |
1029 | } |
1030 | |
1031 | /* |
1032 | * Deal with the completion of writing the data to the cache. |
1033 | */ |
1034 | static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, |
1035 | bool was_async) |
1036 | { |
1037 | struct afs_vnode *vnode = priv; |
1038 | |
1039 | if (IS_ERR_VALUE(transferred_or_error) && |
1040 | transferred_or_error != -ENOBUFS) |
1041 | afs_invalidate_cache(vnode, flags: 0); |
1042 | } |
1043 | |
1044 | /* |
1045 | * Save the write to the cache also. |
1046 | */ |
1047 | static void afs_write_to_cache(struct afs_vnode *vnode, |
1048 | loff_t start, size_t len, loff_t i_size, |
1049 | bool caching) |
1050 | { |
1051 | fscache_write_to_cache(cookie: afs_vnode_cache(vnode), |
1052 | mapping: vnode->netfs.inode.i_mapping, start, len, i_size, |
1053 | term_func: afs_write_to_cache_done, term_func_priv: vnode, caching); |
1054 | } |
1055 | |