1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7**
8**
9*******************************************************************************
10******************************************************************************/
11
12#include "dlm_internal.h"
13#include "lockspace.h"
14#include "dir.h"
15#include "config.h"
16#include "ast.h"
17#include "memory.h"
18#include "rcom.h"
19#include "lock.h"
20#include "lowcomms.h"
21#include "member.h"
22#include "recover.h"
23
24
25/*
26 * Recovery waiting routines: these functions wait for a particular reply from
27 * a remote node, or for the remote node to report a certain status. They need
28 * to abort if the lockspace is stopped indicating a node has failed (perhaps
29 * the one being waited for).
30 */
31
32/*
33 * Wait until given function returns non-zero or lockspace is stopped
34 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
35 * function thinks it could have completed the waited-on task, they should wake
36 * up ls_wait_general to get an immediate response rather than waiting for the
37 * timeout. This uses a timeout so it can check periodically if the wait
38 * should abort due to node failure (which doesn't cause a wake_up).
39 * This should only be called by the dlm_recoverd thread.
40 */
41
42int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
43{
44 int error = 0;
45 int rv;
46
47 while (1) {
48 rv = wait_event_timeout(ls->ls_wait_general,
49 testfn(ls) || dlm_recovery_stopped(ls),
50 dlm_config.ci_recover_timer * HZ);
51 if (rv)
52 break;
53 if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
54 log_debug(ls, "dlm_wait_function timed out");
55 return -ETIMEDOUT;
56 }
57 }
58
59 if (dlm_recovery_stopped(ls)) {
60 log_debug(ls, "dlm_wait_function aborted");
61 error = -EINTR;
62 }
63 return error;
64}
65
66/*
67 * An efficient way for all nodes to wait for all others to have a certain
68 * status. The node with the lowest nodeid polls all the others for their
69 * status (wait_status_all) and all the others poll the node with the low id
70 * for its accumulated result (wait_status_low). When all nodes have set
71 * status flag X, then status flag X_ALL will be set on the low nodeid.
72 */
73
74uint32_t dlm_recover_status(struct dlm_ls *ls)
75{
76 uint32_t status;
77 spin_lock(lock: &ls->ls_recover_lock);
78 status = ls->ls_recover_status;
79 spin_unlock(lock: &ls->ls_recover_lock);
80 return status;
81}
82
83static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
84{
85 ls->ls_recover_status |= status;
86}
87
88void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89{
90 spin_lock(lock: &ls->ls_recover_lock);
91 _set_recover_status(ls, status);
92 spin_unlock(lock: &ls->ls_recover_lock);
93}
94
95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
96 int save_slots, uint64_t seq)
97{
98 struct dlm_rcom *rc = ls->ls_recover_buf;
99 struct dlm_member *memb;
100 int error = 0, delay;
101
102 list_for_each_entry(memb, &ls->ls_nodes, list) {
103 delay = 0;
104 for (;;) {
105 if (dlm_recovery_stopped(ls)) {
106 error = -EINTR;
107 goto out;
108 }
109
110 error = dlm_rcom_status(ls, nodeid: memb->nodeid, status_flags: 0, seq);
111 if (error)
112 goto out;
113
114 if (save_slots)
115 dlm_slot_save(ls, rc, memb);
116
117 if (le32_to_cpu(rc->rc_result) & wait_status)
118 break;
119 if (delay < 1000)
120 delay += 20;
121 msleep(msecs: delay);
122 }
123 }
124 out:
125 return error;
126}
127
128static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
129 uint32_t status_flags, uint64_t seq)
130{
131 struct dlm_rcom *rc = ls->ls_recover_buf;
132 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
133
134 for (;;) {
135 if (dlm_recovery_stopped(ls)) {
136 error = -EINTR;
137 goto out;
138 }
139
140 error = dlm_rcom_status(ls, nodeid, status_flags, seq);
141 if (error)
142 break;
143
144 if (le32_to_cpu(rc->rc_result) & wait_status)
145 break;
146 if (delay < 1000)
147 delay += 20;
148 msleep(msecs: delay);
149 }
150 out:
151 return error;
152}
153
154static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq)
155{
156 uint32_t status_all = status << 1;
157 int error;
158
159 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
160 error = wait_status_all(ls, wait_status: status, save_slots: 0, seq);
161 if (!error)
162 dlm_set_recover_status(ls, status: status_all);
163 } else
164 error = wait_status_low(ls, wait_status: status_all, status_flags: 0, seq);
165
166 return error;
167}
168
169int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
170{
171 struct dlm_member *memb;
172 struct dlm_slot *slots;
173 int num_slots, slots_size;
174 int error, rv;
175 uint32_t gen;
176
177 list_for_each_entry(memb, &ls->ls_nodes, list) {
178 memb->slot = -1;
179 memb->generation = 0;
180 }
181
182 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
183 error = wait_status_all(ls, DLM_RS_NODES, save_slots: 1, seq);
184 if (error)
185 goto out;
186
187 /* slots array is sparse, slots_size may be > num_slots */
188
189 rv = dlm_slots_assign(ls, num_slots: &num_slots, slots_size: &slots_size, slots_out: &slots, gen_out: &gen);
190 if (!rv) {
191 spin_lock(lock: &ls->ls_recover_lock);
192 _set_recover_status(ls, DLM_RS_NODES_ALL);
193 ls->ls_num_slots = num_slots;
194 ls->ls_slots_size = slots_size;
195 ls->ls_slots = slots;
196 ls->ls_generation = gen;
197 spin_unlock(lock: &ls->ls_recover_lock);
198 } else {
199 dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
200 }
201 } else {
202 error = wait_status_low(ls, DLM_RS_NODES_ALL,
203 DLM_RSF_NEED_SLOTS, seq);
204 if (error)
205 goto out;
206
207 dlm_slots_copy_in(ls);
208 }
209 out:
210 return error;
211}
212
213int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq)
214{
215 return wait_status(ls, DLM_RS_DIR, seq);
216}
217
218int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq)
219{
220 return wait_status(ls, DLM_RS_LOCKS, seq);
221}
222
223int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq)
224{
225 return wait_status(ls, DLM_RS_DONE, seq);
226}
227
228/*
229 * The recover_list contains all the rsb's for which we've requested the new
230 * master nodeid. As replies are returned from the resource directories the
231 * rsb's are removed from the list. When the list is empty we're done.
232 *
233 * The recover_list is later similarly used for all rsb's for which we've sent
234 * new lkb's and need to receive new corresponding lkid's.
235 *
236 * We use the address of the rsb struct as a simple local identifier for the
237 * rsb so we can match an rcom reply with the rsb it was sent for.
238 */
239
240static int recover_list_empty(struct dlm_ls *ls)
241{
242 int empty;
243
244 spin_lock(lock: &ls->ls_recover_list_lock);
245 empty = list_empty(head: &ls->ls_recover_list);
246 spin_unlock(lock: &ls->ls_recover_list_lock);
247
248 return empty;
249}
250
251static void recover_list_add(struct dlm_rsb *r)
252{
253 struct dlm_ls *ls = r->res_ls;
254
255 spin_lock(lock: &ls->ls_recover_list_lock);
256 if (list_empty(head: &r->res_recover_list)) {
257 list_add_tail(new: &r->res_recover_list, head: &ls->ls_recover_list);
258 ls->ls_recover_list_count++;
259 dlm_hold_rsb(r);
260 }
261 spin_unlock(lock: &ls->ls_recover_list_lock);
262}
263
264static void recover_list_del(struct dlm_rsb *r)
265{
266 struct dlm_ls *ls = r->res_ls;
267
268 spin_lock(lock: &ls->ls_recover_list_lock);
269 list_del_init(entry: &r->res_recover_list);
270 ls->ls_recover_list_count--;
271 spin_unlock(lock: &ls->ls_recover_list_lock);
272
273 dlm_put_rsb(r);
274}
275
276static void recover_list_clear(struct dlm_ls *ls)
277{
278 struct dlm_rsb *r, *s;
279
280 spin_lock(lock: &ls->ls_recover_list_lock);
281 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
282 list_del_init(entry: &r->res_recover_list);
283 r->res_recover_locks_count = 0;
284 dlm_put_rsb(r);
285 ls->ls_recover_list_count--;
286 }
287
288 if (ls->ls_recover_list_count != 0) {
289 log_error(ls, "warning: recover_list_count %d",
290 ls->ls_recover_list_count);
291 ls->ls_recover_list_count = 0;
292 }
293 spin_unlock(lock: &ls->ls_recover_list_lock);
294}
295
296static int recover_idr_empty(struct dlm_ls *ls)
297{
298 int empty = 1;
299
300 spin_lock(lock: &ls->ls_recover_idr_lock);
301 if (ls->ls_recover_list_count)
302 empty = 0;
303 spin_unlock(lock: &ls->ls_recover_idr_lock);
304
305 return empty;
306}
307
308static int recover_idr_add(struct dlm_rsb *r)
309{
310 struct dlm_ls *ls = r->res_ls;
311 int rv;
312
313 idr_preload(GFP_NOFS);
314 spin_lock(lock: &ls->ls_recover_idr_lock);
315 if (r->res_id) {
316 rv = -1;
317 goto out_unlock;
318 }
319 rv = idr_alloc(&ls->ls_recover_idr, ptr: r, start: 1, end: 0, GFP_NOWAIT);
320 if (rv < 0)
321 goto out_unlock;
322
323 r->res_id = rv;
324 ls->ls_recover_list_count++;
325 dlm_hold_rsb(r);
326 rv = 0;
327out_unlock:
328 spin_unlock(lock: &ls->ls_recover_idr_lock);
329 idr_preload_end();
330 return rv;
331}
332
333static void recover_idr_del(struct dlm_rsb *r)
334{
335 struct dlm_ls *ls = r->res_ls;
336
337 spin_lock(lock: &ls->ls_recover_idr_lock);
338 idr_remove(&ls->ls_recover_idr, id: r->res_id);
339 r->res_id = 0;
340 ls->ls_recover_list_count--;
341 spin_unlock(lock: &ls->ls_recover_idr_lock);
342
343 dlm_put_rsb(r);
344}
345
346static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
347{
348 struct dlm_rsb *r;
349
350 spin_lock(lock: &ls->ls_recover_idr_lock);
351 r = idr_find(&ls->ls_recover_idr, id: (int)id);
352 spin_unlock(lock: &ls->ls_recover_idr_lock);
353 return r;
354}
355
356static void recover_idr_clear(struct dlm_ls *ls)
357{
358 struct dlm_rsb *r;
359 int id;
360
361 spin_lock(lock: &ls->ls_recover_idr_lock);
362
363 idr_for_each_entry(&ls->ls_recover_idr, r, id) {
364 idr_remove(&ls->ls_recover_idr, id);
365 r->res_id = 0;
366 r->res_recover_locks_count = 0;
367 ls->ls_recover_list_count--;
368
369 dlm_put_rsb(r);
370 }
371
372 if (ls->ls_recover_list_count != 0) {
373 log_error(ls, "warning: recover_list_count %d",
374 ls->ls_recover_list_count);
375 ls->ls_recover_list_count = 0;
376 }
377 spin_unlock(lock: &ls->ls_recover_idr_lock);
378}
379
380
381/* Master recovery: find new master node for rsb's that were
382 mastered on nodes that have been removed.
383
384 dlm_recover_masters
385 recover_master
386 dlm_send_rcom_lookup -> receive_rcom_lookup
387 dlm_dir_lookup
388 receive_rcom_lookup_reply <-
389 dlm_recover_master_reply
390 set_new_master
391 set_master_lkbs
392 set_lock_master
393*/
394
395/*
396 * Set the lock master for all LKBs in a lock queue
397 * If we are the new master of the rsb, we may have received new
398 * MSTCPY locks from other nodes already which we need to ignore
399 * when setting the new nodeid.
400 */
401
402static void set_lock_master(struct list_head *queue, int nodeid)
403{
404 struct dlm_lkb *lkb;
405
406 list_for_each_entry(lkb, queue, lkb_statequeue) {
407 if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
408 lkb->lkb_nodeid = nodeid;
409 lkb->lkb_remid = 0;
410 }
411 }
412}
413
414static void set_master_lkbs(struct dlm_rsb *r)
415{
416 set_lock_master(queue: &r->res_grantqueue, nodeid: r->res_nodeid);
417 set_lock_master(queue: &r->res_convertqueue, nodeid: r->res_nodeid);
418 set_lock_master(queue: &r->res_waitqueue, nodeid: r->res_nodeid);
419}
420
421/*
422 * Propagate the new master nodeid to locks
423 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
424 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
425 * rsb's to consider.
426 */
427
428static void set_new_master(struct dlm_rsb *r)
429{
430 set_master_lkbs(r);
431 rsb_set_flag(r, flag: RSB_NEW_MASTER);
432 rsb_set_flag(r, flag: RSB_NEW_MASTER2);
433}
434
435/*
436 * We do async lookups on rsb's that need new masters. The rsb's
437 * waiting for a lookup reply are kept on the recover_list.
438 *
439 * Another node recovering the master may have sent us a rcom lookup,
440 * and our dlm_master_lookup() set it as the new master, along with
441 * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
442 * equals our_nodeid below).
443 */
444
445static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
446{
447 struct dlm_ls *ls = r->res_ls;
448 int our_nodeid, dir_nodeid;
449 int is_removed = 0;
450 int error;
451
452 if (is_master(r))
453 return 0;
454
455 is_removed = dlm_is_removed(ls, nodeid: r->res_nodeid);
456
457 if (!is_removed && !rsb_flag(r, flag: RSB_NEW_MASTER))
458 return 0;
459
460 our_nodeid = dlm_our_nodeid();
461 dir_nodeid = dlm_dir_nodeid(rsb: r);
462
463 if (dir_nodeid == our_nodeid) {
464 if (is_removed) {
465 r->res_master_nodeid = our_nodeid;
466 r->res_nodeid = 0;
467 }
468
469 /* set master of lkbs to ourself when is_removed, or to
470 another new master which we set along with NEW_MASTER
471 in dlm_master_lookup */
472 set_new_master(r);
473 error = 0;
474 } else {
475 recover_idr_add(r);
476 error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
477 }
478
479 (*count)++;
480 return error;
481}
482
483/*
484 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
485 * This is necessary because recovery can be started, aborted and restarted,
486 * causing the master nodeid to briefly change during the aborted recovery, and
487 * change back to the original value in the second recovery. The MSTCPY locks
488 * may or may not have been purged during the aborted recovery. Another node
489 * with an outstanding request in waiters list and a request reply saved in the
490 * requestqueue, cannot know whether it should ignore the reply and resend the
491 * request, or accept the reply and complete the request. It must do the
492 * former if the remote node purged MSTCPY locks, and it must do the later if
493 * the remote node did not. This is solved by always purging MSTCPY locks, in
494 * which case, the request reply would always be ignored and the request
495 * resent.
496 */
497
498static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
499{
500 int dir_nodeid = dlm_dir_nodeid(rsb: r);
501 int new_master = dir_nodeid;
502
503 if (dir_nodeid == dlm_our_nodeid())
504 new_master = 0;
505
506 dlm_purge_mstcpy_locks(r);
507 r->res_master_nodeid = dir_nodeid;
508 r->res_nodeid = new_master;
509 set_new_master(r);
510 (*count)++;
511 return 0;
512}
513
514/*
515 * Go through local root resources and for each rsb which has a master which
516 * has departed, get the new master nodeid from the directory. The dir will
517 * assign mastery to the first node to look up the new master. That means
518 * we'll discover in this lookup if we're the new master of any rsb's.
519 *
520 * We fire off all the dir lookup requests individually and asynchronously to
521 * the correct dir node.
522 */
523
524int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
525{
526 struct dlm_rsb *r;
527 unsigned int total = 0;
528 unsigned int count = 0;
529 int nodir = dlm_no_directory(ls);
530 int error;
531
532 log_rinfo(ls, "dlm_recover_masters");
533
534 down_read(sem: &ls->ls_root_sem);
535 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
536 if (dlm_recovery_stopped(ls)) {
537 up_read(sem: &ls->ls_root_sem);
538 error = -EINTR;
539 goto out;
540 }
541
542 lock_rsb(r);
543 if (nodir)
544 error = recover_master_static(r, count: &count);
545 else
546 error = recover_master(r, count: &count, seq);
547 unlock_rsb(r);
548 cond_resched();
549 total++;
550
551 if (error) {
552 up_read(sem: &ls->ls_root_sem);
553 goto out;
554 }
555 }
556 up_read(sem: &ls->ls_root_sem);
557
558 log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
559
560 error = dlm_wait_function(ls, testfn: &recover_idr_empty);
561 out:
562 if (error)
563 recover_idr_clear(ls);
564 return error;
565}
566
567int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
568{
569 struct dlm_rsb *r;
570 int ret_nodeid, new_master;
571
572 r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
573 if (!r) {
574 log_error(ls, "dlm_recover_master_reply no id %llx",
575 (unsigned long long)le64_to_cpu(rc->rc_id));
576 goto out;
577 }
578
579 ret_nodeid = le32_to_cpu(rc->rc_result);
580
581 if (ret_nodeid == dlm_our_nodeid())
582 new_master = 0;
583 else
584 new_master = ret_nodeid;
585
586 lock_rsb(r);
587 r->res_master_nodeid = ret_nodeid;
588 r->res_nodeid = new_master;
589 set_new_master(r);
590 unlock_rsb(r);
591 recover_idr_del(r);
592
593 if (recover_idr_empty(ls))
594 wake_up(&ls->ls_wait_general);
595 out:
596 return 0;
597}
598
599
600/* Lock recovery: rebuild the process-copy locks we hold on a
601 remastered rsb on the new rsb master.
602
603 dlm_recover_locks
604 recover_locks
605 recover_locks_queue
606 dlm_send_rcom_lock -> receive_rcom_lock
607 dlm_recover_master_copy
608 receive_rcom_lock_reply <-
609 dlm_recover_process_copy
610*/
611
612
613/*
614 * keep a count of the number of lkb's we send to the new master; when we get
615 * an equal number of replies then recovery for the rsb is done
616 */
617
618static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head,
619 uint64_t seq)
620{
621 struct dlm_lkb *lkb;
622 int error = 0;
623
624 list_for_each_entry(lkb, head, lkb_statequeue) {
625 error = dlm_send_rcom_lock(r, lkb, seq);
626 if (error)
627 break;
628 r->res_recover_locks_count++;
629 }
630
631 return error;
632}
633
634static int recover_locks(struct dlm_rsb *r, uint64_t seq)
635{
636 int error = 0;
637
638 lock_rsb(r);
639
640 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
641
642 error = recover_locks_queue(r, head: &r->res_grantqueue, seq);
643 if (error)
644 goto out;
645 error = recover_locks_queue(r, head: &r->res_convertqueue, seq);
646 if (error)
647 goto out;
648 error = recover_locks_queue(r, head: &r->res_waitqueue, seq);
649 if (error)
650 goto out;
651
652 if (r->res_recover_locks_count)
653 recover_list_add(r);
654 else
655 rsb_clear_flag(r, flag: RSB_NEW_MASTER);
656 out:
657 unlock_rsb(r);
658 return error;
659}
660
661int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
662{
663 struct dlm_rsb *r;
664 int error, count = 0;
665
666 down_read(sem: &ls->ls_root_sem);
667 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
668 if (is_master(r)) {
669 rsb_clear_flag(r, flag: RSB_NEW_MASTER);
670 continue;
671 }
672
673 if (!rsb_flag(r, flag: RSB_NEW_MASTER))
674 continue;
675
676 if (dlm_recovery_stopped(ls)) {
677 error = -EINTR;
678 up_read(sem: &ls->ls_root_sem);
679 goto out;
680 }
681
682 error = recover_locks(r, seq);
683 if (error) {
684 up_read(sem: &ls->ls_root_sem);
685 goto out;
686 }
687
688 count += r->res_recover_locks_count;
689 }
690 up_read(sem: &ls->ls_root_sem);
691
692 log_rinfo(ls, "dlm_recover_locks %d out", count);
693
694 error = dlm_wait_function(ls, testfn: &recover_list_empty);
695 out:
696 if (error)
697 recover_list_clear(ls);
698 return error;
699}
700
701void dlm_recovered_lock(struct dlm_rsb *r)
702{
703 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
704
705 r->res_recover_locks_count--;
706 if (!r->res_recover_locks_count) {
707 rsb_clear_flag(r, flag: RSB_NEW_MASTER);
708 recover_list_del(r);
709 }
710
711 if (recover_list_empty(ls: r->res_ls))
712 wake_up(&r->res_ls->ls_wait_general);
713}
714
715/*
716 * The lvb needs to be recovered on all master rsb's. This includes setting
717 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
718 * based on the lvb's of the locks held on the rsb.
719 *
720 * RSB_VALNOTVALID is set in two cases:
721 *
722 * 1. we are master, but not new, and we purged an EX/PW lock held by a
723 * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
724 *
725 * 2. we are a new master, and there are only NL/CR locks left.
726 * (We could probably improve this by only invaliding in this way when
727 * the previous master left uncleanly. VMS docs mention that.)
728 *
729 * The LVB contents are only considered for changing when this is a new master
730 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
731 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
732 * from the lkb with the largest lvb sequence number.
733 */
734
735static void recover_lvb(struct dlm_rsb *r)
736{
737 struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
738 uint32_t high_seq = 0;
739 int lock_lvb_exists = 0;
740 int lvblen = r->res_ls->ls_lvblen;
741
742 if (!rsb_flag(r, flag: RSB_NEW_MASTER2) &&
743 rsb_flag(r, flag: RSB_RECOVER_LVB_INVAL)) {
744 /* case 1 above */
745 rsb_set_flag(r, flag: RSB_VALNOTVALID);
746 return;
747 }
748
749 if (!rsb_flag(r, flag: RSB_NEW_MASTER2))
750 return;
751
752 /* we are the new master, so figure out if VALNOTVALID should
753 be set, and set the rsb lvb from the best lkb available. */
754
755 list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
756 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
757 continue;
758
759 lock_lvb_exists = 1;
760
761 if (iter->lkb_grmode > DLM_LOCK_CR) {
762 big_lkb = iter;
763 goto setflag;
764 }
765
766 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
767 high_lkb = iter;
768 high_seq = iter->lkb_lvbseq;
769 }
770 }
771
772 list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
773 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
774 continue;
775
776 lock_lvb_exists = 1;
777
778 if (iter->lkb_grmode > DLM_LOCK_CR) {
779 big_lkb = iter;
780 goto setflag;
781 }
782
783 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
784 high_lkb = iter;
785 high_seq = iter->lkb_lvbseq;
786 }
787 }
788
789 setflag:
790 if (!lock_lvb_exists)
791 goto out;
792
793 /* lvb is invalidated if only NL/CR locks remain */
794 if (!big_lkb)
795 rsb_set_flag(r, flag: RSB_VALNOTVALID);
796
797 if (!r->res_lvbptr) {
798 r->res_lvbptr = dlm_allocate_lvb(ls: r->res_ls);
799 if (!r->res_lvbptr)
800 goto out;
801 }
802
803 if (big_lkb) {
804 r->res_lvbseq = big_lkb->lkb_lvbseq;
805 memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
806 } else if (high_lkb) {
807 r->res_lvbseq = high_lkb->lkb_lvbseq;
808 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
809 } else {
810 r->res_lvbseq = 0;
811 memset(r->res_lvbptr, 0, lvblen);
812 }
813 out:
814 return;
815}
816
817/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
818 converting PR->CW or CW->PR need to have their lkb_grmode set. */
819
820static void recover_conversion(struct dlm_rsb *r)
821{
822 struct dlm_ls *ls = r->res_ls;
823 struct dlm_lkb *lkb;
824 int grmode = -1;
825
826 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
827 if (lkb->lkb_grmode == DLM_LOCK_PR ||
828 lkb->lkb_grmode == DLM_LOCK_CW) {
829 grmode = lkb->lkb_grmode;
830 break;
831 }
832 }
833
834 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
835 if (lkb->lkb_grmode != DLM_LOCK_IV)
836 continue;
837 if (grmode == -1) {
838 log_debug(ls, "recover_conversion %x set gr to rq %d",
839 lkb->lkb_id, lkb->lkb_rqmode);
840 lkb->lkb_grmode = lkb->lkb_rqmode;
841 } else {
842 log_debug(ls, "recover_conversion %x set gr %d",
843 lkb->lkb_id, grmode);
844 lkb->lkb_grmode = grmode;
845 }
846 }
847}
848
849/* We've become the new master for this rsb and waiting/converting locks may
850 need to be granted in dlm_recover_grant() due to locks that may have
851 existed from a removed node. */
852
853static void recover_grant(struct dlm_rsb *r)
854{
855 if (!list_empty(head: &r->res_waitqueue) || !list_empty(head: &r->res_convertqueue))
856 rsb_set_flag(r, flag: RSB_RECOVER_GRANT);
857}
858
859void dlm_recover_rsbs(struct dlm_ls *ls)
860{
861 struct dlm_rsb *r;
862 unsigned int count = 0;
863
864 down_read(sem: &ls->ls_root_sem);
865 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
866 lock_rsb(r);
867 if (is_master(r)) {
868 if (rsb_flag(r, flag: RSB_RECOVER_CONVERT))
869 recover_conversion(r);
870
871 /* recover lvb before granting locks so the updated
872 lvb/VALNOTVALID is presented in the completion */
873 recover_lvb(r);
874
875 if (rsb_flag(r, flag: RSB_NEW_MASTER2))
876 recover_grant(r);
877 count++;
878 } else {
879 rsb_clear_flag(r, flag: RSB_VALNOTVALID);
880 }
881 rsb_clear_flag(r, flag: RSB_RECOVER_CONVERT);
882 rsb_clear_flag(r, flag: RSB_RECOVER_LVB_INVAL);
883 rsb_clear_flag(r, flag: RSB_NEW_MASTER2);
884 unlock_rsb(r);
885 }
886 up_read(sem: &ls->ls_root_sem);
887
888 if (count)
889 log_rinfo(ls, "dlm_recover_rsbs %d done", count);
890}
891
892/* Create a single list of all root rsb's to be used during recovery */
893
894int dlm_create_root_list(struct dlm_ls *ls)
895{
896 struct rb_node *n;
897 struct dlm_rsb *r;
898 int i, error = 0;
899
900 down_write(sem: &ls->ls_root_sem);
901 if (!list_empty(head: &ls->ls_root_list)) {
902 log_error(ls, "root list not empty");
903 error = -EINVAL;
904 goto out;
905 }
906
907 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
908 spin_lock(lock: &ls->ls_rsbtbl[i].lock);
909 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
910 r = rb_entry(n, struct dlm_rsb, res_hashnode);
911 list_add(new: &r->res_root_list, head: &ls->ls_root_list);
912 dlm_hold_rsb(r);
913 }
914
915 if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
916 log_error(ls, "dlm_create_root_list toss not empty");
917 spin_unlock(lock: &ls->ls_rsbtbl[i].lock);
918 }
919 out:
920 up_write(sem: &ls->ls_root_sem);
921 return error;
922}
923
924void dlm_release_root_list(struct dlm_ls *ls)
925{
926 struct dlm_rsb *r, *safe;
927
928 down_write(sem: &ls->ls_root_sem);
929 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
930 list_del_init(entry: &r->res_root_list);
931 dlm_put_rsb(r);
932 }
933 up_write(sem: &ls->ls_root_sem);
934}
935
936void dlm_clear_toss(struct dlm_ls *ls)
937{
938 struct rb_node *n, *next;
939 struct dlm_rsb *r;
940 unsigned int count = 0;
941 int i;
942
943 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
944 spin_lock(lock: &ls->ls_rsbtbl[i].lock);
945 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
946 next = rb_next(n);
947 r = rb_entry(n, struct dlm_rsb, res_hashnode);
948 rb_erase(n, &ls->ls_rsbtbl[i].toss);
949 dlm_free_rsb(r);
950 count++;
951 }
952 spin_unlock(lock: &ls->ls_rsbtbl[i].lock);
953 }
954
955 if (count)
956 log_rinfo(ls, "dlm_clear_toss %u done", count);
957}
958
959

source code of linux/fs/dlm/recover.c