1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. |
4 | */ |
5 | |
6 | #include "xfs.h" |
7 | #include "xfs_fs.h" |
8 | #include "xfs_format.h" |
9 | #include "xfs_log_format.h" |
10 | #include "xfs_shared.h" |
11 | #include "xfs_trans_resv.h" |
12 | #include "xfs_mount.h" |
13 | #include "xfs_extent_busy.h" |
14 | #include "xfs_trans.h" |
15 | #include "xfs_trans_priv.h" |
16 | #include "xfs_log.h" |
17 | #include "xfs_log_priv.h" |
18 | #include "xfs_trace.h" |
19 | #include "xfs_discard.h" |
20 | |
21 | /* |
22 | * Allocate a new ticket. Failing to get a new ticket makes it really hard to |
23 | * recover, so we don't allow failure here. Also, we allocate in a context that |
24 | * we don't want to be issuing transactions from, so we need to tell the |
25 | * allocation code this as well. |
26 | * |
27 | * We don't reserve any space for the ticket - we are going to steal whatever |
28 | * space we require from transactions as they commit. To ensure we reserve all |
29 | * the space required, we need to set the current reservation of the ticket to |
30 | * zero so that we know to steal the initial transaction overhead from the |
31 | * first transaction commit. |
32 | */ |
33 | static struct xlog_ticket * |
34 | xlog_cil_ticket_alloc( |
35 | struct xlog *log) |
36 | { |
37 | struct xlog_ticket *tic; |
38 | |
39 | tic = xlog_ticket_alloc(log, unit_bytes: 0, count: 1, permanent: 0); |
40 | |
41 | /* |
42 | * set the current reservation to zero so we know to steal the basic |
43 | * transaction overhead reservation from the first transaction commit. |
44 | */ |
45 | tic->t_curr_res = 0; |
46 | tic->t_iclog_hdrs = 0; |
47 | return tic; |
48 | } |
49 | |
50 | static inline void |
51 | xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil) |
52 | { |
53 | struct xlog *log = cil->xc_log; |
54 | |
55 | atomic_set(v: &cil->xc_iclog_hdrs, |
56 | i: (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) / |
57 | (log->l_iclog_size - log->l_iclog_hsize))); |
58 | } |
59 | |
60 | /* |
61 | * Check if the current log item was first committed in this sequence. |
62 | * We can't rely on just the log item being in the CIL, we have to check |
63 | * the recorded commit sequence number. |
64 | * |
65 | * Note: for this to be used in a non-racy manner, it has to be called with |
66 | * CIL flushing locked out. As a result, it should only be used during the |
67 | * transaction commit process when deciding what to format into the item. |
68 | */ |
69 | static bool |
70 | xlog_item_in_current_chkpt( |
71 | struct xfs_cil *cil, |
72 | struct xfs_log_item *lip) |
73 | { |
74 | if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) |
75 | return false; |
76 | |
77 | /* |
78 | * li_seq is written on the first commit of a log item to record the |
79 | * first checkpoint it is written to. Hence if it is different to the |
80 | * current sequence, we're in a new checkpoint. |
81 | */ |
82 | return lip->li_seq == READ_ONCE(cil->xc_current_sequence); |
83 | } |
84 | |
85 | bool |
86 | xfs_log_item_in_current_chkpt( |
87 | struct xfs_log_item *lip) |
88 | { |
89 | return xlog_item_in_current_chkpt(cil: lip->li_log->l_cilp, lip); |
90 | } |
91 | |
92 | /* |
93 | * Unavoidable forward declaration - xlog_cil_push_work() calls |
94 | * xlog_cil_ctx_alloc() itself. |
95 | */ |
96 | static void xlog_cil_push_work(struct work_struct *work); |
97 | |
98 | static struct xfs_cil_ctx * |
99 | xlog_cil_ctx_alloc(void) |
100 | { |
101 | struct xfs_cil_ctx *ctx; |
102 | |
103 | ctx = kmem_zalloc(size: sizeof(*ctx), KM_NOFS); |
104 | INIT_LIST_HEAD(list: &ctx->committing); |
105 | INIT_LIST_HEAD(list: &ctx->busy_extents.extent_list); |
106 | INIT_LIST_HEAD(list: &ctx->log_items); |
107 | INIT_LIST_HEAD(list: &ctx->lv_chain); |
108 | INIT_WORK(&ctx->push_work, xlog_cil_push_work); |
109 | return ctx; |
110 | } |
111 | |
112 | /* |
113 | * Aggregate the CIL per cpu structures into global counts, lists, etc and |
114 | * clear the percpu state ready for the next context to use. This is called |
115 | * from the push code with the context lock held exclusively, hence nothing else |
116 | * will be accessing or modifying the per-cpu counters. |
117 | */ |
118 | static void |
119 | xlog_cil_push_pcp_aggregate( |
120 | struct xfs_cil *cil, |
121 | struct xfs_cil_ctx *ctx) |
122 | { |
123 | struct xlog_cil_pcp *cilpcp; |
124 | int cpu; |
125 | |
126 | for_each_cpu(cpu, &ctx->cil_pcpmask) { |
127 | cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); |
128 | |
129 | ctx->ticket->t_curr_res += cilpcp->space_reserved; |
130 | cilpcp->space_reserved = 0; |
131 | |
132 | if (!list_empty(head: &cilpcp->busy_extents)) { |
133 | list_splice_init(list: &cilpcp->busy_extents, |
134 | head: &ctx->busy_extents.extent_list); |
135 | } |
136 | if (!list_empty(head: &cilpcp->log_items)) |
137 | list_splice_init(list: &cilpcp->log_items, head: &ctx->log_items); |
138 | |
139 | /* |
140 | * We're in the middle of switching cil contexts. Reset the |
141 | * counter we use to detect when the current context is nearing |
142 | * full. |
143 | */ |
144 | cilpcp->space_used = 0; |
145 | } |
146 | } |
147 | |
148 | /* |
149 | * Aggregate the CIL per-cpu space used counters into the global atomic value. |
150 | * This is called when the per-cpu counter aggregation will first pass the soft |
151 | * limit threshold so we can switch to atomic counter aggregation for accurate |
152 | * detection of hard limit traversal. |
153 | */ |
154 | static void |
155 | xlog_cil_insert_pcp_aggregate( |
156 | struct xfs_cil *cil, |
157 | struct xfs_cil_ctx *ctx) |
158 | { |
159 | struct xlog_cil_pcp *cilpcp; |
160 | int cpu; |
161 | int count = 0; |
162 | |
163 | /* Trigger atomic updates then aggregate only for the first caller */ |
164 | if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, addr: &cil->xc_flags)) |
165 | return; |
166 | |
167 | /* |
168 | * We can race with other cpus setting cil_pcpmask. However, we've |
169 | * atomically cleared PCP_SPACE which forces other threads to add to |
170 | * the global space used count. cil_pcpmask is a superset of cilpcp |
171 | * structures that could have a nonzero space_used. |
172 | */ |
173 | for_each_cpu(cpu, &ctx->cil_pcpmask) { |
174 | int old, prev; |
175 | |
176 | cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); |
177 | do { |
178 | old = cilpcp->space_used; |
179 | prev = cmpxchg(&cilpcp->space_used, old, 0); |
180 | } while (old != prev); |
181 | count += old; |
182 | } |
183 | atomic_add(i: count, v: &ctx->space_used); |
184 | } |
185 | |
186 | static void |
187 | xlog_cil_ctx_switch( |
188 | struct xfs_cil *cil, |
189 | struct xfs_cil_ctx *ctx) |
190 | { |
191 | xlog_cil_set_iclog_hdr_count(cil); |
192 | set_bit(XLOG_CIL_EMPTY, addr: &cil->xc_flags); |
193 | set_bit(XLOG_CIL_PCP_SPACE, addr: &cil->xc_flags); |
194 | ctx->sequence = ++cil->xc_current_sequence; |
195 | ctx->cil = cil; |
196 | cil->xc_ctx = ctx; |
197 | } |
198 | |
199 | /* |
200 | * After the first stage of log recovery is done, we know where the head and |
201 | * tail of the log are. We need this log initialisation done before we can |
202 | * initialise the first CIL checkpoint context. |
203 | * |
204 | * Here we allocate a log ticket to track space usage during a CIL push. This |
205 | * ticket is passed to xlog_write() directly so that we don't slowly leak log |
206 | * space by failing to account for space used by log headers and additional |
207 | * region headers for split regions. |
208 | */ |
209 | void |
210 | xlog_cil_init_post_recovery( |
211 | struct xlog *log) |
212 | { |
213 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
214 | log->l_cilp->xc_ctx->sequence = 1; |
215 | xlog_cil_set_iclog_hdr_count(cil: log->l_cilp); |
216 | } |
217 | |
218 | static inline int |
219 | xlog_cil_iovec_space( |
220 | uint niovecs) |
221 | { |
222 | return round_up((sizeof(struct xfs_log_vec) + |
223 | niovecs * sizeof(struct xfs_log_iovec)), |
224 | sizeof(uint64_t)); |
225 | } |
226 | |
227 | /* |
228 | * Allocate or pin log vector buffers for CIL insertion. |
229 | * |
230 | * The CIL currently uses disposable buffers for copying a snapshot of the |
231 | * modified items into the log during a push. The biggest problem with this is |
232 | * the requirement to allocate the disposable buffer during the commit if: |
233 | * a) does not exist; or |
234 | * b) it is too small |
235 | * |
236 | * If we do this allocation within xlog_cil_insert_format_items(), it is done |
237 | * under the xc_ctx_lock, which means that a CIL push cannot occur during |
238 | * the memory allocation. This means that we have a potential deadlock situation |
239 | * under low memory conditions when we have lots of dirty metadata pinned in |
240 | * the CIL and we need a CIL commit to occur to free memory. |
241 | * |
242 | * To avoid this, we need to move the memory allocation outside the |
243 | * xc_ctx_lock, but because the log vector buffers are disposable, that opens |
244 | * up a TOCTOU race condition w.r.t. the CIL committing and removing the log |
245 | * vector buffers between the check and the formatting of the item into the |
246 | * log vector buffer within the xc_ctx_lock. |
247 | * |
248 | * Because the log vector buffer needs to be unchanged during the CIL push |
249 | * process, we cannot share the buffer between the transaction commit (which |
250 | * modifies the buffer) and the CIL push context that is writing the changes |
251 | * into the log. This means skipping preallocation of buffer space is |
252 | * unreliable, but we most definitely do not want to be allocating and freeing |
253 | * buffers unnecessarily during commits when overwrites can be done safely. |
254 | * |
255 | * The simplest solution to this problem is to allocate a shadow buffer when a |
256 | * log item is committed for the second time, and then to only use this buffer |
257 | * if necessary. The buffer can remain attached to the log item until such time |
258 | * it is needed, and this is the buffer that is reallocated to match the size of |
259 | * the incoming modification. Then during the formatting of the item we can swap |
260 | * the active buffer with the new one if we can't reuse the existing buffer. We |
261 | * don't free the old buffer as it may be reused on the next modification if |
262 | * it's size is right, otherwise we'll free and reallocate it at that point. |
263 | * |
264 | * This function builds a vector for the changes in each log item in the |
265 | * transaction. It then works out the length of the buffer needed for each log |
266 | * item, allocates them and attaches the vector to the log item in preparation |
267 | * for the formatting step which occurs under the xc_ctx_lock. |
268 | * |
269 | * While this means the memory footprint goes up, it avoids the repeated |
270 | * alloc/free pattern that repeated modifications of an item would otherwise |
271 | * cause, and hence minimises the CPU overhead of such behaviour. |
272 | */ |
273 | static void |
274 | xlog_cil_alloc_shadow_bufs( |
275 | struct xlog *log, |
276 | struct xfs_trans *tp) |
277 | { |
278 | struct xfs_log_item *lip; |
279 | |
280 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
281 | struct xfs_log_vec *lv; |
282 | int niovecs = 0; |
283 | int nbytes = 0; |
284 | int buf_size; |
285 | bool ordered = false; |
286 | |
287 | /* Skip items which aren't dirty in this transaction. */ |
288 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
289 | continue; |
290 | |
291 | /* get number of vecs and size of data to be stored */ |
292 | lip->li_ops->iop_size(lip, &niovecs, &nbytes); |
293 | |
294 | /* |
295 | * Ordered items need to be tracked but we do not wish to write |
296 | * them. We need a logvec to track the object, but we do not |
297 | * need an iovec or buffer to be allocated for copying data. |
298 | */ |
299 | if (niovecs == XFS_LOG_VEC_ORDERED) { |
300 | ordered = true; |
301 | niovecs = 0; |
302 | nbytes = 0; |
303 | } |
304 | |
305 | /* |
306 | * We 64-bit align the length of each iovec so that the start of |
307 | * the next one is naturally aligned. We'll need to account for |
308 | * that slack space here. |
309 | * |
310 | * We also add the xlog_op_header to each region when |
311 | * formatting, but that's not accounted to the size of the item |
312 | * at this point. Hence we'll need an addition number of bytes |
313 | * for each vector to hold an opheader. |
314 | * |
315 | * Then round nbytes up to 64-bit alignment so that the initial |
316 | * buffer alignment is easy to calculate and verify. |
317 | */ |
318 | nbytes += niovecs * |
319 | (sizeof(uint64_t) + sizeof(struct xlog_op_header)); |
320 | nbytes = round_up(nbytes, sizeof(uint64_t)); |
321 | |
322 | /* |
323 | * The data buffer needs to start 64-bit aligned, so round up |
324 | * that space to ensure we can align it appropriately and not |
325 | * overrun the buffer. |
326 | */ |
327 | buf_size = nbytes + xlog_cil_iovec_space(niovecs); |
328 | |
329 | /* |
330 | * if we have no shadow buffer, or it is too small, we need to |
331 | * reallocate it. |
332 | */ |
333 | if (!lip->li_lv_shadow || |
334 | buf_size > lip->li_lv_shadow->lv_size) { |
335 | /* |
336 | * We free and allocate here as a realloc would copy |
337 | * unnecessary data. We don't use kvzalloc() for the |
338 | * same reason - we don't need to zero the data area in |
339 | * the buffer, only the log vector header and the iovec |
340 | * storage. |
341 | */ |
342 | kmem_free(ptr: lip->li_lv_shadow); |
343 | lv = xlog_kvmalloc(buf_size); |
344 | |
345 | memset(lv, 0, xlog_cil_iovec_space(niovecs)); |
346 | |
347 | INIT_LIST_HEAD(list: &lv->lv_list); |
348 | lv->lv_item = lip; |
349 | lv->lv_size = buf_size; |
350 | if (ordered) |
351 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; |
352 | else |
353 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; |
354 | lip->li_lv_shadow = lv; |
355 | } else { |
356 | /* same or smaller, optimise common overwrite case */ |
357 | lv = lip->li_lv_shadow; |
358 | if (ordered) |
359 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; |
360 | else |
361 | lv->lv_buf_len = 0; |
362 | lv->lv_bytes = 0; |
363 | } |
364 | |
365 | /* Ensure the lv is set up according to ->iop_size */ |
366 | lv->lv_niovecs = niovecs; |
367 | |
368 | /* The allocated data region lies beyond the iovec region */ |
369 | lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); |
370 | } |
371 | |
372 | } |
373 | |
374 | /* |
375 | * Prepare the log item for insertion into the CIL. Calculate the difference in |
376 | * log space it will consume, and if it is a new item pin it as well. |
377 | */ |
378 | STATIC void |
379 | xfs_cil_prepare_item( |
380 | struct xlog *log, |
381 | struct xfs_log_vec *lv, |
382 | struct xfs_log_vec *old_lv, |
383 | int *diff_len) |
384 | { |
385 | /* Account for the new LV being passed in */ |
386 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) |
387 | *diff_len += lv->lv_bytes; |
388 | |
389 | /* |
390 | * If there is no old LV, this is the first time we've seen the item in |
391 | * this CIL context and so we need to pin it. If we are replacing the |
392 | * old_lv, then remove the space it accounts for and make it the shadow |
393 | * buffer for later freeing. In both cases we are now switching to the |
394 | * shadow buffer, so update the pointer to it appropriately. |
395 | */ |
396 | if (!old_lv) { |
397 | if (lv->lv_item->li_ops->iop_pin) |
398 | lv->lv_item->li_ops->iop_pin(lv->lv_item); |
399 | lv->lv_item->li_lv_shadow = NULL; |
400 | } else if (old_lv != lv) { |
401 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
402 | |
403 | *diff_len -= old_lv->lv_bytes; |
404 | lv->lv_item->li_lv_shadow = old_lv; |
405 | } |
406 | |
407 | /* attach new log vector to log item */ |
408 | lv->lv_item->li_lv = lv; |
409 | |
410 | /* |
411 | * If this is the first time the item is being committed to the |
412 | * CIL, store the sequence number on the log item so we can |
413 | * tell in future commits whether this is the first checkpoint |
414 | * the item is being committed into. |
415 | */ |
416 | if (!lv->lv_item->li_seq) |
417 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; |
418 | } |
419 | |
420 | /* |
421 | * Format log item into a flat buffers |
422 | * |
423 | * For delayed logging, we need to hold a formatted buffer containing all the |
424 | * changes on the log item. This enables us to relog the item in memory and |
425 | * write it out asynchronously without needing to relock the object that was |
426 | * modified at the time it gets written into the iclog. |
427 | * |
428 | * This function takes the prepared log vectors attached to each log item, and |
429 | * formats the changes into the log vector buffer. The buffer it uses is |
430 | * dependent on the current state of the vector in the CIL - the shadow lv is |
431 | * guaranteed to be large enough for the current modification, but we will only |
432 | * use that if we can't reuse the existing lv. If we can't reuse the existing |
433 | * lv, then simple swap it out for the shadow lv. We don't free it - that is |
434 | * done lazily either by th enext modification or the freeing of the log item. |
435 | * |
436 | * We don't set up region headers during this process; we simply copy the |
437 | * regions into the flat buffer. We can do this because we still have to do a |
438 | * formatting step to write the regions into the iclog buffer. Writing the |
439 | * ophdrs during the iclog write means that we can support splitting large |
440 | * regions across iclog boundares without needing a change in the format of the |
441 | * item/region encapsulation. |
442 | * |
443 | * Hence what we need to do now is change the rewrite the vector array to point |
444 | * to the copied region inside the buffer we just allocated. This allows us to |
445 | * format the regions into the iclog as though they are being formatted |
446 | * directly out of the objects themselves. |
447 | */ |
448 | static void |
449 | xlog_cil_insert_format_items( |
450 | struct xlog *log, |
451 | struct xfs_trans *tp, |
452 | int *diff_len) |
453 | { |
454 | struct xfs_log_item *lip; |
455 | |
456 | /* Bail out if we didn't find a log item. */ |
457 | if (list_empty(head: &tp->t_items)) { |
458 | ASSERT(0); |
459 | return; |
460 | } |
461 | |
462 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
463 | struct xfs_log_vec *lv; |
464 | struct xfs_log_vec *old_lv = NULL; |
465 | struct xfs_log_vec *shadow; |
466 | bool ordered = false; |
467 | |
468 | /* Skip items which aren't dirty in this transaction. */ |
469 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
470 | continue; |
471 | |
472 | /* |
473 | * The formatting size information is already attached to |
474 | * the shadow lv on the log item. |
475 | */ |
476 | shadow = lip->li_lv_shadow; |
477 | if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) |
478 | ordered = true; |
479 | |
480 | /* Skip items that do not have any vectors for writing */ |
481 | if (!shadow->lv_niovecs && !ordered) |
482 | continue; |
483 | |
484 | /* compare to existing item size */ |
485 | old_lv = lip->li_lv; |
486 | if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { |
487 | /* same or smaller, optimise common overwrite case */ |
488 | lv = lip->li_lv; |
489 | |
490 | if (ordered) |
491 | goto insert; |
492 | |
493 | /* |
494 | * set the item up as though it is a new insertion so |
495 | * that the space reservation accounting is correct. |
496 | */ |
497 | *diff_len -= lv->lv_bytes; |
498 | |
499 | /* Ensure the lv is set up according to ->iop_size */ |
500 | lv->lv_niovecs = shadow->lv_niovecs; |
501 | |
502 | /* reset the lv buffer information for new formatting */ |
503 | lv->lv_buf_len = 0; |
504 | lv->lv_bytes = 0; |
505 | lv->lv_buf = (char *)lv + |
506 | xlog_cil_iovec_space(niovecs: lv->lv_niovecs); |
507 | } else { |
508 | /* switch to shadow buffer! */ |
509 | lv = shadow; |
510 | lv->lv_item = lip; |
511 | if (ordered) { |
512 | /* track as an ordered logvec */ |
513 | ASSERT(lip->li_lv == NULL); |
514 | goto insert; |
515 | } |
516 | } |
517 | |
518 | ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); |
519 | lip->li_ops->iop_format(lip, lv); |
520 | insert: |
521 | xfs_cil_prepare_item(log, lv, old_lv, diff_len); |
522 | } |
523 | } |
524 | |
525 | /* |
526 | * The use of lockless waitqueue_active() requires that the caller has |
527 | * serialised itself against the wakeup call in xlog_cil_push_work(). That |
528 | * can be done by either holding the push lock or the context lock. |
529 | */ |
530 | static inline bool |
531 | xlog_cil_over_hard_limit( |
532 | struct xlog *log, |
533 | int32_t space_used) |
534 | { |
535 | if (waitqueue_active(wq_head: &log->l_cilp->xc_push_wait)) |
536 | return true; |
537 | if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) |
538 | return true; |
539 | return false; |
540 | } |
541 | |
542 | /* |
543 | * Insert the log items into the CIL and calculate the difference in space |
544 | * consumed by the item. Add the space to the checkpoint ticket and calculate |
545 | * if the change requires additional log metadata. If it does, take that space |
546 | * as well. Remove the amount of space we added to the checkpoint ticket from |
547 | * the current transaction ticket so that the accounting works out correctly. |
548 | */ |
549 | static void |
550 | xlog_cil_insert_items( |
551 | struct xlog *log, |
552 | struct xfs_trans *tp, |
553 | uint32_t released_space) |
554 | { |
555 | struct xfs_cil *cil = log->l_cilp; |
556 | struct xfs_cil_ctx *ctx = cil->xc_ctx; |
557 | struct xfs_log_item *lip; |
558 | int len = 0; |
559 | int iovhdr_res = 0, split_res = 0, ctx_res = 0; |
560 | int space_used; |
561 | int order; |
562 | unsigned int cpu_nr; |
563 | struct xlog_cil_pcp *cilpcp; |
564 | |
565 | ASSERT(tp); |
566 | |
567 | /* |
568 | * We can do this safely because the context can't checkpoint until we |
569 | * are done so it doesn't matter exactly how we update the CIL. |
570 | */ |
571 | xlog_cil_insert_format_items(log, tp, diff_len: &len); |
572 | |
573 | /* |
574 | * Subtract the space released by intent cancelation from the space we |
575 | * consumed so that we remove it from the CIL space and add it back to |
576 | * the current transaction reservation context. |
577 | */ |
578 | len -= released_space; |
579 | |
580 | /* |
581 | * Grab the per-cpu pointer for the CIL before we start any accounting. |
582 | * That ensures that we are running with pre-emption disabled and so we |
583 | * can't be scheduled away between split sample/update operations that |
584 | * are done without outside locking to serialise them. |
585 | */ |
586 | cpu_nr = get_cpu(); |
587 | cilpcp = this_cpu_ptr(cil->xc_pcp); |
588 | |
589 | /* Tell the future push that there was work added by this CPU. */ |
590 | if (!cpumask_test_cpu(cpu: cpu_nr, cpumask: &ctx->cil_pcpmask)) |
591 | cpumask_test_and_set_cpu(cpu: cpu_nr, cpumask: &ctx->cil_pcpmask); |
592 | |
593 | /* |
594 | * We need to take the CIL checkpoint unit reservation on the first |
595 | * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't |
596 | * unnecessarily do an atomic op in the fast path here. We can clear the |
597 | * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that |
598 | * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit. |
599 | */ |
600 | if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && |
601 | test_and_clear_bit(XLOG_CIL_EMPTY, addr: &cil->xc_flags)) |
602 | ctx_res = ctx->ticket->t_unit_res; |
603 | |
604 | /* |
605 | * Check if we need to steal iclog headers. atomic_read() is not a |
606 | * locked atomic operation, so we can check the value before we do any |
607 | * real atomic ops in the fast path. If we've already taken the CIL unit |
608 | * reservation from this commit, we've already got one iclog header |
609 | * space reserved so we have to account for that otherwise we risk |
610 | * overrunning the reservation on this ticket. |
611 | * |
612 | * If the CIL is already at the hard limit, we might need more header |
613 | * space that originally reserved. So steal more header space from every |
614 | * commit that occurs once we are over the hard limit to ensure the CIL |
615 | * push won't run out of reservation space. |
616 | * |
617 | * This can steal more than we need, but that's OK. |
618 | * |
619 | * The cil->xc_ctx_lock provides the serialisation necessary for safely |
620 | * calling xlog_cil_over_hard_limit() in this context. |
621 | */ |
622 | space_used = atomic_read(v: &ctx->space_used) + cilpcp->space_used + len; |
623 | if (atomic_read(v: &cil->xc_iclog_hdrs) > 0 || |
624 | xlog_cil_over_hard_limit(log, space_used)) { |
625 | split_res = log->l_iclog_hsize + |
626 | sizeof(struct xlog_op_header); |
627 | if (ctx_res) |
628 | ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); |
629 | else |
630 | ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; |
631 | atomic_sub(i: tp->t_ticket->t_iclog_hdrs, v: &cil->xc_iclog_hdrs); |
632 | } |
633 | cilpcp->space_reserved += ctx_res; |
634 | |
635 | /* |
636 | * Accurately account when over the soft limit, otherwise fold the |
637 | * percpu count into the global count if over the per-cpu threshold. |
638 | */ |
639 | if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { |
640 | atomic_add(i: len, v: &ctx->space_used); |
641 | } else if (cilpcp->space_used + len > |
642 | (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) { |
643 | space_used = atomic_add_return(i: cilpcp->space_used + len, |
644 | v: &ctx->space_used); |
645 | cilpcp->space_used = 0; |
646 | |
647 | /* |
648 | * If we just transitioned over the soft limit, we need to |
649 | * transition to the global atomic counter. |
650 | */ |
651 | if (space_used >= XLOG_CIL_SPACE_LIMIT(log)) |
652 | xlog_cil_insert_pcp_aggregate(cil, ctx); |
653 | } else { |
654 | cilpcp->space_used += len; |
655 | } |
656 | /* attach the transaction to the CIL if it has any busy extents */ |
657 | if (!list_empty(head: &tp->t_busy)) |
658 | list_splice_init(list: &tp->t_busy, head: &cilpcp->busy_extents); |
659 | |
660 | /* |
661 | * Now update the order of everything modified in the transaction |
662 | * and insert items into the CIL if they aren't already there. |
663 | * We do this here so we only need to take the CIL lock once during |
664 | * the transaction commit. |
665 | */ |
666 | order = atomic_inc_return(v: &ctx->order_id); |
667 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
668 | /* Skip items which aren't dirty in this transaction. */ |
669 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
670 | continue; |
671 | |
672 | lip->li_order_id = order; |
673 | if (!list_empty(head: &lip->li_cil)) |
674 | continue; |
675 | list_add_tail(new: &lip->li_cil, head: &cilpcp->log_items); |
676 | } |
677 | put_cpu(); |
678 | |
679 | /* |
680 | * If we've overrun the reservation, dump the tx details before we move |
681 | * the log items. Shutdown is imminent... |
682 | */ |
683 | tp->t_ticket->t_curr_res -= ctx_res + len; |
684 | if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { |
685 | xfs_warn(log->l_mp, "Transaction log reservation overrun:" ); |
686 | xfs_warn(log->l_mp, |
687 | " log items: %d bytes (iov hdrs: %d bytes)" , |
688 | len, iovhdr_res); |
689 | xfs_warn(log->l_mp, " split region headers: %d bytes" , |
690 | split_res); |
691 | xfs_warn(log->l_mp, " ctx ticket: %d bytes" , ctx_res); |
692 | xlog_print_trans(tp); |
693 | xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); |
694 | } |
695 | } |
696 | |
697 | static void |
698 | xlog_cil_free_logvec( |
699 | struct list_head *lv_chain) |
700 | { |
701 | struct xfs_log_vec *lv; |
702 | |
703 | while (!list_empty(head: lv_chain)) { |
704 | lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list); |
705 | list_del_init(entry: &lv->lv_list); |
706 | kmem_free(ptr: lv); |
707 | } |
708 | } |
709 | |
710 | /* |
711 | * Mark all items committed and clear busy extents. We free the log vector |
712 | * chains in a separate pass so that we unpin the log items as quickly as |
713 | * possible. |
714 | */ |
715 | static void |
716 | xlog_cil_committed( |
717 | struct xfs_cil_ctx *ctx) |
718 | { |
719 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
720 | bool abort = xlog_is_shutdown(log: ctx->cil->xc_log); |
721 | |
722 | /* |
723 | * If the I/O failed, we're aborting the commit and already shutdown. |
724 | * Wake any commit waiters before aborting the log items so we don't |
725 | * block async log pushers on callbacks. Async log pushers explicitly do |
726 | * not wait on log force completion because they may be holding locks |
727 | * required to unpin items. |
728 | */ |
729 | if (abort) { |
730 | spin_lock(lock: &ctx->cil->xc_push_lock); |
731 | wake_up_all(&ctx->cil->xc_start_wait); |
732 | wake_up_all(&ctx->cil->xc_commit_wait); |
733 | spin_unlock(lock: &ctx->cil->xc_push_lock); |
734 | } |
735 | |
736 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain, |
737 | ctx->start_lsn, abort); |
738 | |
739 | xfs_extent_busy_sort(list: &ctx->busy_extents.extent_list); |
740 | xfs_extent_busy_clear(mp, list: &ctx->busy_extents.extent_list, |
741 | do_discard: xfs_has_discard(mp) && !abort); |
742 | |
743 | spin_lock(lock: &ctx->cil->xc_push_lock); |
744 | list_del(entry: &ctx->committing); |
745 | spin_unlock(lock: &ctx->cil->xc_push_lock); |
746 | |
747 | xlog_cil_free_logvec(lv_chain: &ctx->lv_chain); |
748 | |
749 | if (!list_empty(head: &ctx->busy_extents.extent_list)) { |
750 | ctx->busy_extents.mount = mp; |
751 | ctx->busy_extents.owner = ctx; |
752 | xfs_discard_extents(mp, busy: &ctx->busy_extents); |
753 | return; |
754 | } |
755 | |
756 | kmem_free(ptr: ctx); |
757 | } |
758 | |
759 | void |
760 | xlog_cil_process_committed( |
761 | struct list_head *list) |
762 | { |
763 | struct xfs_cil_ctx *ctx; |
764 | |
765 | while ((ctx = list_first_entry_or_null(list, |
766 | struct xfs_cil_ctx, iclog_entry))) { |
767 | list_del(entry: &ctx->iclog_entry); |
768 | xlog_cil_committed(ctx); |
769 | } |
770 | } |
771 | |
772 | /* |
773 | * Record the LSN of the iclog we were just granted space to start writing into. |
774 | * If the context doesn't have a start_lsn recorded, then this iclog will |
775 | * contain the start record for the checkpoint. Otherwise this write contains |
776 | * the commit record for the checkpoint. |
777 | */ |
778 | void |
779 | xlog_cil_set_ctx_write_state( |
780 | struct xfs_cil_ctx *ctx, |
781 | struct xlog_in_core *iclog) |
782 | { |
783 | struct xfs_cil *cil = ctx->cil; |
784 | xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
785 | |
786 | ASSERT(!ctx->commit_lsn); |
787 | if (!ctx->start_lsn) { |
788 | spin_lock(lock: &cil->xc_push_lock); |
789 | /* |
790 | * The LSN we need to pass to the log items on transaction |
791 | * commit is the LSN reported by the first log vector write, not |
792 | * the commit lsn. If we use the commit record lsn then we can |
793 | * move the grant write head beyond the tail LSN and overwrite |
794 | * it. |
795 | */ |
796 | ctx->start_lsn = lsn; |
797 | wake_up_all(&cil->xc_start_wait); |
798 | spin_unlock(lock: &cil->xc_push_lock); |
799 | |
800 | /* |
801 | * Make sure the metadata we are about to overwrite in the log |
802 | * has been flushed to stable storage before this iclog is |
803 | * issued. |
804 | */ |
805 | spin_lock(lock: &cil->xc_log->l_icloglock); |
806 | iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; |
807 | spin_unlock(lock: &cil->xc_log->l_icloglock); |
808 | return; |
809 | } |
810 | |
811 | /* |
812 | * Take a reference to the iclog for the context so that we still hold |
813 | * it when xlog_write is done and has released it. This means the |
814 | * context controls when the iclog is released for IO. |
815 | */ |
816 | atomic_inc(v: &iclog->ic_refcnt); |
817 | |
818 | /* |
819 | * xlog_state_get_iclog_space() guarantees there is enough space in the |
820 | * iclog for an entire commit record, so we can attach the context |
821 | * callbacks now. This needs to be done before we make the commit_lsn |
822 | * visible to waiters so that checkpoints with commit records in the |
823 | * same iclog order their IO completion callbacks in the same order that |
824 | * the commit records appear in the iclog. |
825 | */ |
826 | spin_lock(lock: &cil->xc_log->l_icloglock); |
827 | list_add_tail(new: &ctx->iclog_entry, head: &iclog->ic_callbacks); |
828 | spin_unlock(lock: &cil->xc_log->l_icloglock); |
829 | |
830 | /* |
831 | * Now we can record the commit LSN and wake anyone waiting for this |
832 | * sequence to have the ordered commit record assigned to a physical |
833 | * location in the log. |
834 | */ |
835 | spin_lock(lock: &cil->xc_push_lock); |
836 | ctx->commit_iclog = iclog; |
837 | ctx->commit_lsn = lsn; |
838 | wake_up_all(&cil->xc_commit_wait); |
839 | spin_unlock(lock: &cil->xc_push_lock); |
840 | } |
841 | |
842 | |
843 | /* |
844 | * Ensure that the order of log writes follows checkpoint sequence order. This |
845 | * relies on the context LSN being zero until the log write has guaranteed the |
846 | * LSN that the log write will start at via xlog_state_get_iclog_space(). |
847 | */ |
848 | enum _record_type { |
849 | _START_RECORD, |
850 | _COMMIT_RECORD, |
851 | }; |
852 | |
853 | static int |
854 | xlog_cil_order_write( |
855 | struct xfs_cil *cil, |
856 | xfs_csn_t sequence, |
857 | enum _record_type record) |
858 | { |
859 | struct xfs_cil_ctx *ctx; |
860 | |
861 | restart: |
862 | spin_lock(lock: &cil->xc_push_lock); |
863 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
864 | /* |
865 | * Avoid getting stuck in this loop because we were woken by the |
866 | * shutdown, but then went back to sleep once already in the |
867 | * shutdown state. |
868 | */ |
869 | if (xlog_is_shutdown(log: cil->xc_log)) { |
870 | spin_unlock(lock: &cil->xc_push_lock); |
871 | return -EIO; |
872 | } |
873 | |
874 | /* |
875 | * Higher sequences will wait for this one so skip them. |
876 | * Don't wait for our own sequence, either. |
877 | */ |
878 | if (ctx->sequence >= sequence) |
879 | continue; |
880 | |
881 | /* Wait until the LSN for the record has been recorded. */ |
882 | switch (record) { |
883 | case _START_RECORD: |
884 | if (!ctx->start_lsn) { |
885 | xlog_wait(wq: &cil->xc_start_wait, lock: &cil->xc_push_lock); |
886 | goto restart; |
887 | } |
888 | break; |
889 | case _COMMIT_RECORD: |
890 | if (!ctx->commit_lsn) { |
891 | xlog_wait(wq: &cil->xc_commit_wait, lock: &cil->xc_push_lock); |
892 | goto restart; |
893 | } |
894 | break; |
895 | } |
896 | } |
897 | spin_unlock(lock: &cil->xc_push_lock); |
898 | return 0; |
899 | } |
900 | |
901 | /* |
902 | * Write out the log vector change now attached to the CIL context. This will |
903 | * write a start record that needs to be strictly ordered in ascending CIL |
904 | * sequence order so that log recovery will always use in-order start LSNs when |
905 | * replaying checkpoints. |
906 | */ |
907 | static int |
908 | xlog_cil_write_chain( |
909 | struct xfs_cil_ctx *ctx, |
910 | uint32_t chain_len) |
911 | { |
912 | struct xlog *log = ctx->cil->xc_log; |
913 | int error; |
914 | |
915 | error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); |
916 | if (error) |
917 | return error; |
918 | return xlog_write(log, ctx, lv_chain: &ctx->lv_chain, tic: ctx->ticket, len: chain_len); |
919 | } |
920 | |
921 | /* |
922 | * Write out the commit record of a checkpoint transaction to close off a |
923 | * running log write. These commit records are strictly ordered in ascending CIL |
924 | * sequence order so that log recovery will always replay the checkpoints in the |
925 | * correct order. |
926 | */ |
927 | static int |
928 | xlog_cil_write_commit_record( |
929 | struct xfs_cil_ctx *ctx) |
930 | { |
931 | struct xlog *log = ctx->cil->xc_log; |
932 | struct ophdr = { |
933 | .oh_clientid = XFS_TRANSACTION, |
934 | .oh_tid = cpu_to_be32(ctx->ticket->t_tid), |
935 | .oh_flags = XLOG_COMMIT_TRANS, |
936 | }; |
937 | struct xfs_log_iovec reg = { |
938 | .i_addr = &ophdr, |
939 | .i_len = sizeof(struct xlog_op_header), |
940 | .i_type = XLOG_REG_TYPE_COMMIT, |
941 | }; |
942 | struct xfs_log_vec vec = { |
943 | .lv_niovecs = 1, |
944 | .lv_iovecp = ®, |
945 | }; |
946 | int error; |
947 | LIST_HEAD(lv_chain); |
948 | list_add(new: &vec.lv_list, head: &lv_chain); |
949 | |
950 | if (xlog_is_shutdown(log)) |
951 | return -EIO; |
952 | |
953 | error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); |
954 | if (error) |
955 | return error; |
956 | |
957 | /* account for space used by record data */ |
958 | ctx->ticket->t_curr_res -= reg.i_len; |
959 | error = xlog_write(log, ctx, lv_chain: &lv_chain, tic: ctx->ticket, len: reg.i_len); |
960 | if (error) |
961 | xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); |
962 | return error; |
963 | } |
964 | |
965 | struct xlog_cil_trans_hdr { |
966 | struct oph[2]; |
967 | struct thdr; |
968 | struct xfs_log_iovec lhdr[2]; |
969 | }; |
970 | |
971 | /* |
972 | * Build a checkpoint transaction header to begin the journal transaction. We |
973 | * need to account for the space used by the transaction header here as it is |
974 | * not accounted for in xlog_write(). |
975 | * |
976 | * This is the only place we write a transaction header, so we also build the |
977 | * log opheaders that indicate the start of a log transaction and wrap the |
978 | * transaction header. We keep the start record in it's own log vector rather |
979 | * than compacting them into a single region as this ends up making the logic |
980 | * in xlog_write() for handling empty opheaders for start, commit and unmount |
981 | * records much simpler. |
982 | */ |
983 | static void |
984 | xlog_cil_build_trans_hdr( |
985 | struct xfs_cil_ctx *ctx, |
986 | struct xlog_cil_trans_hdr *hdr, |
987 | struct xfs_log_vec *lvhdr, |
988 | int num_iovecs) |
989 | { |
990 | struct xlog_ticket *tic = ctx->ticket; |
991 | __be32 tid = cpu_to_be32(tic->t_tid); |
992 | |
993 | memset(hdr, 0, sizeof(*hdr)); |
994 | |
995 | /* Log start record */ |
996 | hdr->oph[0].oh_tid = tid; |
997 | hdr->oph[0].oh_clientid = XFS_TRANSACTION; |
998 | hdr->oph[0].oh_flags = XLOG_START_TRANS; |
999 | |
1000 | /* log iovec region pointer */ |
1001 | hdr->lhdr[0].i_addr = &hdr->oph[0]; |
1002 | hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); |
1003 | hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; |
1004 | |
1005 | /* log opheader */ |
1006 | hdr->oph[1].oh_tid = tid; |
1007 | hdr->oph[1].oh_clientid = XFS_TRANSACTION; |
1008 | hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); |
1009 | |
1010 | /* transaction header in host byte order format */ |
1011 | hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; |
1012 | hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; |
1013 | hdr->thdr.th_tid = tic->t_tid; |
1014 | hdr->thdr.th_num_items = num_iovecs; |
1015 | |
1016 | /* log iovec region pointer */ |
1017 | hdr->lhdr[1].i_addr = &hdr->oph[1]; |
1018 | hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + |
1019 | sizeof(struct xfs_trans_header); |
1020 | hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; |
1021 | |
1022 | lvhdr->lv_niovecs = 2; |
1023 | lvhdr->lv_iovecp = &hdr->lhdr[0]; |
1024 | lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; |
1025 | |
1026 | tic->t_curr_res -= lvhdr->lv_bytes; |
1027 | } |
1028 | |
1029 | /* |
1030 | * CIL item reordering compare function. We want to order in ascending ID order, |
1031 | * but we want to leave items with the same ID in the order they were added to |
1032 | * the list. This is important for operations like reflink where we log 4 order |
1033 | * dependent intents in a single transaction when we overwrite an existing |
1034 | * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop), |
1035 | * CUI (inc), BUI(remap)... |
1036 | */ |
1037 | static int |
1038 | xlog_cil_order_cmp( |
1039 | void *priv, |
1040 | const struct list_head *a, |
1041 | const struct list_head *b) |
1042 | { |
1043 | struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list); |
1044 | struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list); |
1045 | |
1046 | return l1->lv_order_id > l2->lv_order_id; |
1047 | } |
1048 | |
1049 | /* |
1050 | * Pull all the log vectors off the items in the CIL, and remove the items from |
1051 | * the CIL. We don't need the CIL lock here because it's only needed on the |
1052 | * transaction commit side which is currently locked out by the flush lock. |
1053 | * |
1054 | * If a log item is marked with a whiteout, we do not need to write it to the |
1055 | * journal and so we just move them to the whiteout list for the caller to |
1056 | * dispose of appropriately. |
1057 | */ |
1058 | static void |
1059 | xlog_cil_build_lv_chain( |
1060 | struct xfs_cil_ctx *ctx, |
1061 | struct list_head *whiteouts, |
1062 | uint32_t *num_iovecs, |
1063 | uint32_t *num_bytes) |
1064 | { |
1065 | while (!list_empty(head: &ctx->log_items)) { |
1066 | struct xfs_log_item *item; |
1067 | struct xfs_log_vec *lv; |
1068 | |
1069 | item = list_first_entry(&ctx->log_items, |
1070 | struct xfs_log_item, li_cil); |
1071 | |
1072 | if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { |
1073 | list_move(list: &item->li_cil, head: whiteouts); |
1074 | trace_xfs_cil_whiteout_skip(lip: item); |
1075 | continue; |
1076 | } |
1077 | |
1078 | lv = item->li_lv; |
1079 | lv->lv_order_id = item->li_order_id; |
1080 | |
1081 | /* we don't write ordered log vectors */ |
1082 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) |
1083 | *num_bytes += lv->lv_bytes; |
1084 | *num_iovecs += lv->lv_niovecs; |
1085 | list_add_tail(new: &lv->lv_list, head: &ctx->lv_chain); |
1086 | |
1087 | list_del_init(entry: &item->li_cil); |
1088 | item->li_order_id = 0; |
1089 | item->li_lv = NULL; |
1090 | } |
1091 | } |
1092 | |
1093 | static void |
1094 | xlog_cil_cleanup_whiteouts( |
1095 | struct list_head *whiteouts) |
1096 | { |
1097 | while (!list_empty(head: whiteouts)) { |
1098 | struct xfs_log_item *item = list_first_entry(whiteouts, |
1099 | struct xfs_log_item, li_cil); |
1100 | list_del_init(entry: &item->li_cil); |
1101 | trace_xfs_cil_whiteout_unpin(lip: item); |
1102 | item->li_ops->iop_unpin(item, 1); |
1103 | } |
1104 | } |
1105 | |
1106 | /* |
1107 | * Push the Committed Item List to the log. |
1108 | * |
1109 | * If the current sequence is the same as xc_push_seq we need to do a flush. If |
1110 | * xc_push_seq is less than the current sequence, then it has already been |
1111 | * flushed and we don't need to do anything - the caller will wait for it to |
1112 | * complete if necessary. |
1113 | * |
1114 | * xc_push_seq is checked unlocked against the sequence number for a match. |
1115 | * Hence we can allow log forces to run racily and not issue pushes for the |
1116 | * same sequence twice. If we get a race between multiple pushes for the same |
1117 | * sequence they will block on the first one and then abort, hence avoiding |
1118 | * needless pushes. |
1119 | */ |
1120 | static void |
1121 | xlog_cil_push_work( |
1122 | struct work_struct *work) |
1123 | { |
1124 | struct xfs_cil_ctx *ctx = |
1125 | container_of(work, struct xfs_cil_ctx, push_work); |
1126 | struct xfs_cil *cil = ctx->cil; |
1127 | struct xlog *log = cil->xc_log; |
1128 | struct xfs_cil_ctx *new_ctx; |
1129 | int num_iovecs = 0; |
1130 | int num_bytes = 0; |
1131 | int error = 0; |
1132 | struct xlog_cil_trans_hdr thdr; |
1133 | struct xfs_log_vec lvhdr = {}; |
1134 | xfs_csn_t push_seq; |
1135 | bool push_commit_stable; |
1136 | LIST_HEAD (whiteouts); |
1137 | struct xlog_ticket *ticket; |
1138 | |
1139 | new_ctx = xlog_cil_ctx_alloc(); |
1140 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
1141 | |
1142 | down_write(sem: &cil->xc_ctx_lock); |
1143 | |
1144 | spin_lock(lock: &cil->xc_push_lock); |
1145 | push_seq = cil->xc_push_seq; |
1146 | ASSERT(push_seq <= ctx->sequence); |
1147 | push_commit_stable = cil->xc_push_commit_stable; |
1148 | cil->xc_push_commit_stable = false; |
1149 | |
1150 | /* |
1151 | * As we are about to switch to a new, empty CIL context, we no longer |
1152 | * need to throttle tasks on CIL space overruns. Wake any waiters that |
1153 | * the hard push throttle may have caught so they can start committing |
1154 | * to the new context. The ctx->xc_push_lock provides the serialisation |
1155 | * necessary for safely using the lockless waitqueue_active() check in |
1156 | * this context. |
1157 | */ |
1158 | if (waitqueue_active(wq_head: &cil->xc_push_wait)) |
1159 | wake_up_all(&cil->xc_push_wait); |
1160 | |
1161 | xlog_cil_push_pcp_aggregate(cil, ctx); |
1162 | |
1163 | /* |
1164 | * Check if we've anything to push. If there is nothing, then we don't |
1165 | * move on to a new sequence number and so we have to be able to push |
1166 | * this sequence again later. |
1167 | */ |
1168 | if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { |
1169 | cil->xc_push_seq = 0; |
1170 | spin_unlock(lock: &cil->xc_push_lock); |
1171 | goto out_skip; |
1172 | } |
1173 | |
1174 | |
1175 | /* check for a previously pushed sequence */ |
1176 | if (push_seq < ctx->sequence) { |
1177 | spin_unlock(lock: &cil->xc_push_lock); |
1178 | goto out_skip; |
1179 | } |
1180 | |
1181 | /* |
1182 | * We are now going to push this context, so add it to the committing |
1183 | * list before we do anything else. This ensures that anyone waiting on |
1184 | * this push can easily detect the difference between a "push in |
1185 | * progress" and "CIL is empty, nothing to do". |
1186 | * |
1187 | * IOWs, a wait loop can now check for: |
1188 | * the current sequence not being found on the committing list; |
1189 | * an empty CIL; and |
1190 | * an unchanged sequence number |
1191 | * to detect a push that had nothing to do and therefore does not need |
1192 | * waiting on. If the CIL is not empty, we get put on the committing |
1193 | * list before emptying the CIL and bumping the sequence number. Hence |
1194 | * an empty CIL and an unchanged sequence number means we jumped out |
1195 | * above after doing nothing. |
1196 | * |
1197 | * Hence the waiter will either find the commit sequence on the |
1198 | * committing list or the sequence number will be unchanged and the CIL |
1199 | * still dirty. In that latter case, the push has not yet started, and |
1200 | * so the waiter will have to continue trying to check the CIL |
1201 | * committing list until it is found. In extreme cases of delay, the |
1202 | * sequence may fully commit between the attempts the wait makes to wait |
1203 | * on the commit sequence. |
1204 | */ |
1205 | list_add(new: &ctx->committing, head: &cil->xc_committing); |
1206 | spin_unlock(lock: &cil->xc_push_lock); |
1207 | |
1208 | xlog_cil_build_lv_chain(ctx, whiteouts: &whiteouts, num_iovecs: &num_iovecs, num_bytes: &num_bytes); |
1209 | |
1210 | /* |
1211 | * Switch the contexts so we can drop the context lock and move out |
1212 | * of a shared context. We can't just go straight to the commit record, |
1213 | * though - we need to synchronise with previous and future commits so |
1214 | * that the commit records are correctly ordered in the log to ensure |
1215 | * that we process items during log IO completion in the correct order. |
1216 | * |
1217 | * For example, if we get an EFI in one checkpoint and the EFD in the |
1218 | * next (e.g. due to log forces), we do not want the checkpoint with |
1219 | * the EFD to be committed before the checkpoint with the EFI. Hence |
1220 | * we must strictly order the commit records of the checkpoints so |
1221 | * that: a) the checkpoint callbacks are attached to the iclogs in the |
1222 | * correct order; and b) the checkpoints are replayed in correct order |
1223 | * in log recovery. |
1224 | * |
1225 | * Hence we need to add this context to the committing context list so |
1226 | * that higher sequences will wait for us to write out a commit record |
1227 | * before they do. |
1228 | * |
1229 | * xfs_log_force_seq requires us to mirror the new sequence into the cil |
1230 | * structure atomically with the addition of this sequence to the |
1231 | * committing list. This also ensures that we can do unlocked checks |
1232 | * against the current sequence in log forces without risking |
1233 | * deferencing a freed context pointer. |
1234 | */ |
1235 | spin_lock(lock: &cil->xc_push_lock); |
1236 | xlog_cil_ctx_switch(cil, ctx: new_ctx); |
1237 | spin_unlock(lock: &cil->xc_push_lock); |
1238 | up_write(sem: &cil->xc_ctx_lock); |
1239 | |
1240 | /* |
1241 | * Sort the log vector chain before we add the transaction headers. |
1242 | * This ensures we always have the transaction headers at the start |
1243 | * of the chain. |
1244 | */ |
1245 | list_sort(NULL, head: &ctx->lv_chain, cmp: xlog_cil_order_cmp); |
1246 | |
1247 | /* |
1248 | * Build a checkpoint transaction header and write it to the log to |
1249 | * begin the transaction. We need to account for the space used by the |
1250 | * transaction header here as it is not accounted for in xlog_write(). |
1251 | * Add the lvhdr to the head of the lv chain we pass to xlog_write() so |
1252 | * it gets written into the iclog first. |
1253 | */ |
1254 | xlog_cil_build_trans_hdr(ctx, hdr: &thdr, lvhdr: &lvhdr, num_iovecs); |
1255 | num_bytes += lvhdr.lv_bytes; |
1256 | list_add(new: &lvhdr.lv_list, head: &ctx->lv_chain); |
1257 | |
1258 | /* |
1259 | * Take the lvhdr back off the lv_chain immediately after calling |
1260 | * xlog_cil_write_chain() as it should not be passed to log IO |
1261 | * completion. |
1262 | */ |
1263 | error = xlog_cil_write_chain(ctx, chain_len: num_bytes); |
1264 | list_del(entry: &lvhdr.lv_list); |
1265 | if (error) |
1266 | goto out_abort_free_ticket; |
1267 | |
1268 | error = xlog_cil_write_commit_record(ctx); |
1269 | if (error) |
1270 | goto out_abort_free_ticket; |
1271 | |
1272 | /* |
1273 | * Grab the ticket from the ctx so we can ungrant it after releasing the |
1274 | * commit_iclog. The ctx may be freed by the time we return from |
1275 | * releasing the commit_iclog (i.e. checkpoint has been completed and |
1276 | * callback run) so we can't reference the ctx after the call to |
1277 | * xlog_state_release_iclog(). |
1278 | */ |
1279 | ticket = ctx->ticket; |
1280 | |
1281 | /* |
1282 | * If the checkpoint spans multiple iclogs, wait for all previous iclogs |
1283 | * to complete before we submit the commit_iclog. We can't use state |
1284 | * checks for this - ACTIVE can be either a past completed iclog or a |
1285 | * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a |
1286 | * past or future iclog awaiting IO or ordered IO completion to be run. |
1287 | * In the latter case, if it's a future iclog and we wait on it, the we |
1288 | * will hang because it won't get processed through to ic_force_wait |
1289 | * wakeup until this commit_iclog is written to disk. Hence we use the |
1290 | * iclog header lsn and compare it to the commit lsn to determine if we |
1291 | * need to wait on iclogs or not. |
1292 | */ |
1293 | spin_lock(lock: &log->l_icloglock); |
1294 | if (ctx->start_lsn != ctx->commit_lsn) { |
1295 | xfs_lsn_t plsn; |
1296 | |
1297 | plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); |
1298 | if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { |
1299 | /* |
1300 | * Waiting on ic_force_wait orders the completion of |
1301 | * iclogs older than ic_prev. Hence we only need to wait |
1302 | * on the most recent older iclog here. |
1303 | */ |
1304 | xlog_wait_on_iclog(iclog: ctx->commit_iclog->ic_prev); |
1305 | spin_lock(lock: &log->l_icloglock); |
1306 | } |
1307 | |
1308 | /* |
1309 | * We need to issue a pre-flush so that the ordering for this |
1310 | * checkpoint is correctly preserved down to stable storage. |
1311 | */ |
1312 | ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; |
1313 | } |
1314 | |
1315 | /* |
1316 | * The commit iclog must be written to stable storage to guarantee |
1317 | * journal IO vs metadata writeback IO is correctly ordered on stable |
1318 | * storage. |
1319 | * |
1320 | * If the push caller needs the commit to be immediately stable and the |
1321 | * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it |
1322 | * will be written when released, switch it's state to WANT_SYNC right |
1323 | * now. |
1324 | */ |
1325 | ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; |
1326 | if (push_commit_stable && |
1327 | ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) |
1328 | xlog_state_switch_iclogs(log, iclog: ctx->commit_iclog, eventual_size: 0); |
1329 | ticket = ctx->ticket; |
1330 | xlog_state_release_iclog(log, iclog: ctx->commit_iclog, ticket); |
1331 | |
1332 | /* Not safe to reference ctx now! */ |
1333 | |
1334 | spin_unlock(lock: &log->l_icloglock); |
1335 | xlog_cil_cleanup_whiteouts(whiteouts: &whiteouts); |
1336 | xfs_log_ticket_ungrant(log, ticket); |
1337 | return; |
1338 | |
1339 | out_skip: |
1340 | up_write(sem: &cil->xc_ctx_lock); |
1341 | xfs_log_ticket_put(ticket: new_ctx->ticket); |
1342 | kmem_free(ptr: new_ctx); |
1343 | return; |
1344 | |
1345 | out_abort_free_ticket: |
1346 | ASSERT(xlog_is_shutdown(log)); |
1347 | xlog_cil_cleanup_whiteouts(whiteouts: &whiteouts); |
1348 | if (!ctx->commit_iclog) { |
1349 | xfs_log_ticket_ungrant(log, ticket: ctx->ticket); |
1350 | xlog_cil_committed(ctx); |
1351 | return; |
1352 | } |
1353 | spin_lock(lock: &log->l_icloglock); |
1354 | ticket = ctx->ticket; |
1355 | xlog_state_release_iclog(log, iclog: ctx->commit_iclog, ticket); |
1356 | /* Not safe to reference ctx now! */ |
1357 | spin_unlock(lock: &log->l_icloglock); |
1358 | xfs_log_ticket_ungrant(log, ticket); |
1359 | } |
1360 | |
1361 | /* |
1362 | * We need to push CIL every so often so we don't cache more than we can fit in |
1363 | * the log. The limit really is that a checkpoint can't be more than half the |
1364 | * log (the current checkpoint is not allowed to overwrite the previous |
1365 | * checkpoint), but commit latency and memory usage limit this to a smaller |
1366 | * size. |
1367 | */ |
1368 | static void |
1369 | xlog_cil_push_background( |
1370 | struct xlog *log) __releases(cil->xc_ctx_lock) |
1371 | { |
1372 | struct xfs_cil *cil = log->l_cilp; |
1373 | int space_used = atomic_read(v: &cil->xc_ctx->space_used); |
1374 | |
1375 | /* |
1376 | * The cil won't be empty because we are called while holding the |
1377 | * context lock so whatever we added to the CIL will still be there. |
1378 | */ |
1379 | ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); |
1380 | |
1381 | /* |
1382 | * We are done if: |
1383 | * - we haven't used up all the space available yet; or |
1384 | * - we've already queued up a push; and |
1385 | * - we're not over the hard limit; and |
1386 | * - nothing has been over the hard limit. |
1387 | * |
1388 | * If so, we don't need to take the push lock as there's nothing to do. |
1389 | */ |
1390 | if (space_used < XLOG_CIL_SPACE_LIMIT(log) || |
1391 | (cil->xc_push_seq == cil->xc_current_sequence && |
1392 | space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) && |
1393 | !waitqueue_active(wq_head: &cil->xc_push_wait))) { |
1394 | up_read(sem: &cil->xc_ctx_lock); |
1395 | return; |
1396 | } |
1397 | |
1398 | spin_lock(lock: &cil->xc_push_lock); |
1399 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
1400 | cil->xc_push_seq = cil->xc_current_sequence; |
1401 | queue_work(wq: cil->xc_push_wq, work: &cil->xc_ctx->push_work); |
1402 | } |
1403 | |
1404 | /* |
1405 | * Drop the context lock now, we can't hold that if we need to sleep |
1406 | * because we are over the blocking threshold. The push_lock is still |
1407 | * held, so blocking threshold sleep/wakeup is still correctly |
1408 | * serialised here. |
1409 | */ |
1410 | up_read(sem: &cil->xc_ctx_lock); |
1411 | |
1412 | /* |
1413 | * If we are well over the space limit, throttle the work that is being |
1414 | * done until the push work on this context has begun. Enforce the hard |
1415 | * throttle on all transaction commits once it has been activated, even |
1416 | * if the committing transactions have resulted in the space usage |
1417 | * dipping back down under the hard limit. |
1418 | * |
1419 | * The ctx->xc_push_lock provides the serialisation necessary for safely |
1420 | * calling xlog_cil_over_hard_limit() in this context. |
1421 | */ |
1422 | if (xlog_cil_over_hard_limit(log, space_used)) { |
1423 | trace_xfs_log_cil_wait(log, tic: cil->xc_ctx->ticket); |
1424 | ASSERT(space_used < log->l_logsize); |
1425 | xlog_wait(wq: &cil->xc_push_wait, lock: &cil->xc_push_lock); |
1426 | return; |
1427 | } |
1428 | |
1429 | spin_unlock(lock: &cil->xc_push_lock); |
1430 | |
1431 | } |
1432 | |
1433 | /* |
1434 | * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence |
1435 | * number that is passed. When it returns, the work will be queued for |
1436 | * @push_seq, but it won't be completed. |
1437 | * |
1438 | * If the caller is performing a synchronous force, we will flush the workqueue |
1439 | * to get previously queued work moving to minimise the wait time they will |
1440 | * undergo waiting for all outstanding pushes to complete. The caller is |
1441 | * expected to do the required waiting for push_seq to complete. |
1442 | * |
1443 | * If the caller is performing an async push, we need to ensure that the |
1444 | * checkpoint is fully flushed out of the iclogs when we finish the push. If we |
1445 | * don't do this, then the commit record may remain sitting in memory in an |
1446 | * ACTIVE iclog. This then requires another full log force to push to disk, |
1447 | * which defeats the purpose of having an async, non-blocking CIL force |
1448 | * mechanism. Hence in this case we need to pass a flag to the push work to |
1449 | * indicate it needs to flush the commit record itself. |
1450 | */ |
1451 | static void |
1452 | xlog_cil_push_now( |
1453 | struct xlog *log, |
1454 | xfs_lsn_t push_seq, |
1455 | bool async) |
1456 | { |
1457 | struct xfs_cil *cil = log->l_cilp; |
1458 | |
1459 | if (!cil) |
1460 | return; |
1461 | |
1462 | ASSERT(push_seq && push_seq <= cil->xc_current_sequence); |
1463 | |
1464 | /* start on any pending background push to minimise wait time on it */ |
1465 | if (!async) |
1466 | flush_workqueue(cil->xc_push_wq); |
1467 | |
1468 | spin_lock(lock: &cil->xc_push_lock); |
1469 | |
1470 | /* |
1471 | * If this is an async flush request, we always need to set the |
1472 | * xc_push_commit_stable flag even if something else has already queued |
1473 | * a push. The flush caller is asking for the CIL to be on stable |
1474 | * storage when the next push completes, so regardless of who has queued |
1475 | * the push, the flush requires stable semantics from it. |
1476 | */ |
1477 | cil->xc_push_commit_stable = async; |
1478 | |
1479 | /* |
1480 | * If the CIL is empty or we've already pushed the sequence then |
1481 | * there's no more work that we need to do. |
1482 | */ |
1483 | if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || |
1484 | push_seq <= cil->xc_push_seq) { |
1485 | spin_unlock(lock: &cil->xc_push_lock); |
1486 | return; |
1487 | } |
1488 | |
1489 | cil->xc_push_seq = push_seq; |
1490 | queue_work(wq: cil->xc_push_wq, work: &cil->xc_ctx->push_work); |
1491 | spin_unlock(lock: &cil->xc_push_lock); |
1492 | } |
1493 | |
1494 | bool |
1495 | xlog_cil_empty( |
1496 | struct xlog *log) |
1497 | { |
1498 | struct xfs_cil *cil = log->l_cilp; |
1499 | bool empty = false; |
1500 | |
1501 | spin_lock(lock: &cil->xc_push_lock); |
1502 | if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) |
1503 | empty = true; |
1504 | spin_unlock(lock: &cil->xc_push_lock); |
1505 | return empty; |
1506 | } |
1507 | |
1508 | /* |
1509 | * If there are intent done items in this transaction and the related intent was |
1510 | * committed in the current (same) CIL checkpoint, we don't need to write either |
1511 | * the intent or intent done item to the journal as the change will be |
1512 | * journalled atomically within this checkpoint. As we cannot remove items from |
1513 | * the CIL here, mark the related intent with a whiteout so that the CIL push |
1514 | * can remove it rather than writing it to the journal. Then remove the intent |
1515 | * done item from the current transaction and release it so it doesn't get put |
1516 | * into the CIL at all. |
1517 | */ |
1518 | static uint32_t |
1519 | xlog_cil_process_intents( |
1520 | struct xfs_cil *cil, |
1521 | struct xfs_trans *tp) |
1522 | { |
1523 | struct xfs_log_item *lip, *ilip, *next; |
1524 | uint32_t len = 0; |
1525 | |
1526 | list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { |
1527 | if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) |
1528 | continue; |
1529 | |
1530 | ilip = lip->li_ops->iop_intent(lip); |
1531 | if (!ilip || !xlog_item_in_current_chkpt(cil, lip: ilip)) |
1532 | continue; |
1533 | set_bit(XFS_LI_WHITEOUT, addr: &ilip->li_flags); |
1534 | trace_xfs_cil_whiteout_mark(lip: ilip); |
1535 | len += ilip->li_lv->lv_bytes; |
1536 | kmem_free(ptr: ilip->li_lv); |
1537 | ilip->li_lv = NULL; |
1538 | |
1539 | xfs_trans_del_item(lip); |
1540 | lip->li_ops->iop_release(lip); |
1541 | } |
1542 | return len; |
1543 | } |
1544 | |
1545 | /* |
1546 | * Commit a transaction with the given vector to the Committed Item List. |
1547 | * |
1548 | * To do this, we need to format the item, pin it in memory if required and |
1549 | * account for the space used by the transaction. Once we have done that we |
1550 | * need to release the unused reservation for the transaction, attach the |
1551 | * transaction to the checkpoint context so we carry the busy extents through |
1552 | * to checkpoint completion, and then unlock all the items in the transaction. |
1553 | * |
1554 | * Called with the context lock already held in read mode to lock out |
1555 | * background commit, returns without it held once background commits are |
1556 | * allowed again. |
1557 | */ |
1558 | void |
1559 | xlog_cil_commit( |
1560 | struct xlog *log, |
1561 | struct xfs_trans *tp, |
1562 | xfs_csn_t *commit_seq, |
1563 | bool regrant) |
1564 | { |
1565 | struct xfs_cil *cil = log->l_cilp; |
1566 | struct xfs_log_item *lip, *next; |
1567 | uint32_t released_space = 0; |
1568 | |
1569 | /* |
1570 | * Do all necessary memory allocation before we lock the CIL. |
1571 | * This ensures the allocation does not deadlock with a CIL |
1572 | * push in memory reclaim (e.g. from kswapd). |
1573 | */ |
1574 | xlog_cil_alloc_shadow_bufs(log, tp); |
1575 | |
1576 | /* lock out background commit */ |
1577 | down_read(sem: &cil->xc_ctx_lock); |
1578 | |
1579 | if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) |
1580 | released_space = xlog_cil_process_intents(cil, tp); |
1581 | |
1582 | xlog_cil_insert_items(log, tp, released_space); |
1583 | |
1584 | if (regrant && !xlog_is_shutdown(log)) |
1585 | xfs_log_ticket_regrant(log, ticket: tp->t_ticket); |
1586 | else |
1587 | xfs_log_ticket_ungrant(log, ticket: tp->t_ticket); |
1588 | tp->t_ticket = NULL; |
1589 | xfs_trans_unreserve_and_mod_sb(tp); |
1590 | |
1591 | /* |
1592 | * Once all the items of the transaction have been copied to the CIL, |
1593 | * the items can be unlocked and possibly freed. |
1594 | * |
1595 | * This needs to be done before we drop the CIL context lock because we |
1596 | * have to update state in the log items and unlock them before they go |
1597 | * to disk. If we don't, then the CIL checkpoint can race with us and |
1598 | * we can run checkpoint completion before we've updated and unlocked |
1599 | * the log items. This affects (at least) processing of stale buffers, |
1600 | * inodes and EFIs. |
1601 | */ |
1602 | trace_xfs_trans_commit_items(tp, _RET_IP_); |
1603 | list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { |
1604 | xfs_trans_del_item(lip); |
1605 | if (lip->li_ops->iop_committing) |
1606 | lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); |
1607 | } |
1608 | if (commit_seq) |
1609 | *commit_seq = cil->xc_ctx->sequence; |
1610 | |
1611 | /* xlog_cil_push_background() releases cil->xc_ctx_lock */ |
1612 | xlog_cil_push_background(log); |
1613 | } |
1614 | |
1615 | /* |
1616 | * Flush the CIL to stable storage but don't wait for it to complete. This |
1617 | * requires the CIL push to ensure the commit record for the push hits the disk, |
1618 | * but otherwise is no different to a push done from a log force. |
1619 | */ |
1620 | void |
1621 | xlog_cil_flush( |
1622 | struct xlog *log) |
1623 | { |
1624 | xfs_csn_t seq = log->l_cilp->xc_current_sequence; |
1625 | |
1626 | trace_xfs_log_force(log->l_mp, seq, _RET_IP_); |
1627 | xlog_cil_push_now(log, seq, true); |
1628 | |
1629 | /* |
1630 | * If the CIL is empty, make sure that any previous checkpoint that may |
1631 | * still be in an active iclog is pushed to stable storage. |
1632 | */ |
1633 | if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) |
1634 | xfs_log_force(mp: log->l_mp, flags: 0); |
1635 | } |
1636 | |
1637 | /* |
1638 | * Conditionally push the CIL based on the sequence passed in. |
1639 | * |
1640 | * We only need to push if we haven't already pushed the sequence number given. |
1641 | * Hence the only time we will trigger a push here is if the push sequence is |
1642 | * the same as the current context. |
1643 | * |
1644 | * We return the current commit lsn to allow the callers to determine if a |
1645 | * iclog flush is necessary following this call. |
1646 | */ |
1647 | xfs_lsn_t |
1648 | xlog_cil_force_seq( |
1649 | struct xlog *log, |
1650 | xfs_csn_t sequence) |
1651 | { |
1652 | struct xfs_cil *cil = log->l_cilp; |
1653 | struct xfs_cil_ctx *ctx; |
1654 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; |
1655 | |
1656 | ASSERT(sequence <= cil->xc_current_sequence); |
1657 | |
1658 | if (!sequence) |
1659 | sequence = cil->xc_current_sequence; |
1660 | trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); |
1661 | |
1662 | /* |
1663 | * check to see if we need to force out the current context. |
1664 | * xlog_cil_push() handles racing pushes for the same sequence, |
1665 | * so no need to deal with it here. |
1666 | */ |
1667 | restart: |
1668 | xlog_cil_push_now(log, sequence, false); |
1669 | |
1670 | /* |
1671 | * See if we can find a previous sequence still committing. |
1672 | * We need to wait for all previous sequence commits to complete |
1673 | * before allowing the force of push_seq to go ahead. Hence block |
1674 | * on commits for those as well. |
1675 | */ |
1676 | spin_lock(lock: &cil->xc_push_lock); |
1677 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
1678 | /* |
1679 | * Avoid getting stuck in this loop because we were woken by the |
1680 | * shutdown, but then went back to sleep once already in the |
1681 | * shutdown state. |
1682 | */ |
1683 | if (xlog_is_shutdown(log)) |
1684 | goto out_shutdown; |
1685 | if (ctx->sequence > sequence) |
1686 | continue; |
1687 | if (!ctx->commit_lsn) { |
1688 | /* |
1689 | * It is still being pushed! Wait for the push to |
1690 | * complete, then start again from the beginning. |
1691 | */ |
1692 | XFS_STATS_INC(log->l_mp, xs_log_force_sleep); |
1693 | xlog_wait(wq: &cil->xc_commit_wait, lock: &cil->xc_push_lock); |
1694 | goto restart; |
1695 | } |
1696 | if (ctx->sequence != sequence) |
1697 | continue; |
1698 | /* found it! */ |
1699 | commit_lsn = ctx->commit_lsn; |
1700 | } |
1701 | |
1702 | /* |
1703 | * The call to xlog_cil_push_now() executes the push in the background. |
1704 | * Hence by the time we have got here it our sequence may not have been |
1705 | * pushed yet. This is true if the current sequence still matches the |
1706 | * push sequence after the above wait loop and the CIL still contains |
1707 | * dirty objects. This is guaranteed by the push code first adding the |
1708 | * context to the committing list before emptying the CIL. |
1709 | * |
1710 | * Hence if we don't find the context in the committing list and the |
1711 | * current sequence number is unchanged then the CIL contents are |
1712 | * significant. If the CIL is empty, if means there was nothing to push |
1713 | * and that means there is nothing to wait for. If the CIL is not empty, |
1714 | * it means we haven't yet started the push, because if it had started |
1715 | * we would have found the context on the committing list. |
1716 | */ |
1717 | if (sequence == cil->xc_current_sequence && |
1718 | !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { |
1719 | spin_unlock(lock: &cil->xc_push_lock); |
1720 | goto restart; |
1721 | } |
1722 | |
1723 | spin_unlock(lock: &cil->xc_push_lock); |
1724 | return commit_lsn; |
1725 | |
1726 | /* |
1727 | * We detected a shutdown in progress. We need to trigger the log force |
1728 | * to pass through it's iclog state machine error handling, even though |
1729 | * we are already in a shutdown state. Hence we can't return |
1730 | * NULLCOMMITLSN here as that has special meaning to log forces (i.e. |
1731 | * LSN is already stable), so we return a zero LSN instead. |
1732 | */ |
1733 | out_shutdown: |
1734 | spin_unlock(lock: &cil->xc_push_lock); |
1735 | return 0; |
1736 | } |
1737 | |
1738 | /* |
1739 | * Perform initial CIL structure initialisation. |
1740 | */ |
1741 | int |
1742 | xlog_cil_init( |
1743 | struct xlog *log) |
1744 | { |
1745 | struct xfs_cil *cil; |
1746 | struct xfs_cil_ctx *ctx; |
1747 | struct xlog_cil_pcp *cilpcp; |
1748 | int cpu; |
1749 | |
1750 | cil = kmem_zalloc(size: sizeof(*cil), KM_MAYFAIL); |
1751 | if (!cil) |
1752 | return -ENOMEM; |
1753 | /* |
1754 | * Limit the CIL pipeline depth to 4 concurrent works to bound the |
1755 | * concurrency the log spinlocks will be exposed to. |
1756 | */ |
1757 | cil->xc_push_wq = alloc_workqueue(fmt: "xfs-cil/%s" , |
1758 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), |
1759 | max_active: 4, log->l_mp->m_super->s_id); |
1760 | if (!cil->xc_push_wq) |
1761 | goto out_destroy_cil; |
1762 | |
1763 | cil->xc_log = log; |
1764 | cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); |
1765 | if (!cil->xc_pcp) |
1766 | goto out_destroy_wq; |
1767 | |
1768 | for_each_possible_cpu(cpu) { |
1769 | cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); |
1770 | INIT_LIST_HEAD(list: &cilpcp->busy_extents); |
1771 | INIT_LIST_HEAD(list: &cilpcp->log_items); |
1772 | } |
1773 | |
1774 | INIT_LIST_HEAD(list: &cil->xc_committing); |
1775 | spin_lock_init(&cil->xc_push_lock); |
1776 | init_waitqueue_head(&cil->xc_push_wait); |
1777 | init_rwsem(&cil->xc_ctx_lock); |
1778 | init_waitqueue_head(&cil->xc_start_wait); |
1779 | init_waitqueue_head(&cil->xc_commit_wait); |
1780 | log->l_cilp = cil; |
1781 | |
1782 | ctx = xlog_cil_ctx_alloc(); |
1783 | xlog_cil_ctx_switch(cil, ctx); |
1784 | return 0; |
1785 | |
1786 | out_destroy_wq: |
1787 | destroy_workqueue(wq: cil->xc_push_wq); |
1788 | out_destroy_cil: |
1789 | kmem_free(ptr: cil); |
1790 | return -ENOMEM; |
1791 | } |
1792 | |
1793 | void |
1794 | xlog_cil_destroy( |
1795 | struct xlog *log) |
1796 | { |
1797 | struct xfs_cil *cil = log->l_cilp; |
1798 | |
1799 | if (cil->xc_ctx) { |
1800 | if (cil->xc_ctx->ticket) |
1801 | xfs_log_ticket_put(ticket: cil->xc_ctx->ticket); |
1802 | kmem_free(ptr: cil->xc_ctx); |
1803 | } |
1804 | |
1805 | ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); |
1806 | free_percpu(pdata: cil->xc_pcp); |
1807 | destroy_workqueue(wq: cil->xc_push_wq); |
1808 | kmem_free(ptr: cil); |
1809 | } |
1810 | |
1811 | |