1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | drbd_int.h |
4 | |
5 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
6 | |
7 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. |
8 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
9 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
10 | |
11 | |
12 | */ |
13 | |
14 | #ifndef _DRBD_INT_H |
15 | #define _DRBD_INT_H |
16 | |
17 | #include <crypto/hash.h> |
18 | #include <linux/compiler.h> |
19 | #include <linux/types.h> |
20 | #include <linux/list.h> |
21 | #include <linux/sched/signal.h> |
22 | #include <linux/bitops.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/ratelimit.h> |
25 | #include <linux/tcp.h> |
26 | #include <linux/mutex.h> |
27 | #include <linux/major.h> |
28 | #include <linux/blkdev.h> |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/idr.h> |
31 | #include <linux/dynamic_debug.h> |
32 | #include <net/tcp.h> |
33 | #include <linux/lru_cache.h> |
34 | #include <linux/prefetch.h> |
35 | #include <linux/drbd_genl_api.h> |
36 | #include <linux/drbd.h> |
37 | #include <linux/drbd_config.h> |
38 | #include "drbd_strings.h" |
39 | #include "drbd_state.h" |
40 | #include "drbd_protocol.h" |
41 | #include "drbd_polymorph_printk.h" |
42 | |
43 | /* shared module parameters, defined in drbd_main.c */ |
44 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
45 | extern int drbd_enable_faults; |
46 | extern int drbd_fault_rate; |
47 | #endif |
48 | |
49 | extern unsigned int drbd_minor_count; |
50 | extern char drbd_usermode_helper[]; |
51 | extern int drbd_proc_details; |
52 | |
53 | |
54 | /* This is used to stop/restart our threads. |
55 | * Cannot use SIGTERM nor SIGKILL, since these |
56 | * are sent out by init on runlevel changes |
57 | * I choose SIGHUP for now. |
58 | */ |
59 | #define DRBD_SIGKILL SIGHUP |
60 | |
61 | #define ID_IN_SYNC (4711ULL) |
62 | #define ID_OUT_OF_SYNC (4712ULL) |
63 | #define ID_SYNCER (-1ULL) |
64 | |
65 | #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) |
66 | |
67 | struct drbd_device; |
68 | struct drbd_connection; |
69 | struct drbd_peer_device; |
70 | |
71 | /* Defines to control fault insertion */ |
72 | enum { |
73 | DRBD_FAULT_MD_WR = 0, /* meta data write */ |
74 | DRBD_FAULT_MD_RD = 1, /* read */ |
75 | DRBD_FAULT_RS_WR = 2, /* resync */ |
76 | DRBD_FAULT_RS_RD = 3, |
77 | DRBD_FAULT_DT_WR = 4, /* data */ |
78 | DRBD_FAULT_DT_RD = 5, |
79 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ |
80 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ |
81 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ |
82 | DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ |
83 | |
84 | DRBD_FAULT_MAX, |
85 | }; |
86 | |
87 | extern unsigned int |
88 | _drbd_insert_fault(struct drbd_device *device, unsigned int type); |
89 | |
90 | static inline int |
91 | drbd_insert_fault(struct drbd_device *device, unsigned int type) { |
92 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
93 | return drbd_fault_rate && |
94 | (drbd_enable_faults & (1<<type)) && |
95 | _drbd_insert_fault(device, type); |
96 | #else |
97 | return 0; |
98 | #endif |
99 | } |
100 | |
101 | /* integer division, round _UP_ to the next integer */ |
102 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) |
103 | /* usual integer division */ |
104 | #define div_floor(A, B) ((A)/(B)) |
105 | |
106 | extern struct ratelimit_state drbd_ratelimit_state; |
107 | extern struct idr drbd_devices; /* RCU, updates: genl_lock() */ |
108 | extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */ |
109 | |
110 | extern const char *cmdname(enum drbd_packet cmd); |
111 | |
112 | /* for sending/receiving the bitmap, |
113 | * possibly in some encoding scheme */ |
114 | struct bm_xfer_ctx { |
115 | /* "const" |
116 | * stores total bits and long words |
117 | * of the bitmap, so we don't need to |
118 | * call the accessor functions over and again. */ |
119 | unsigned long bm_bits; |
120 | unsigned long bm_words; |
121 | /* during xfer, current position within the bitmap */ |
122 | unsigned long bit_offset; |
123 | unsigned long word_offset; |
124 | |
125 | /* statistics; index: (h->command == P_BITMAP) */ |
126 | unsigned packets[2]; |
127 | unsigned bytes[2]; |
128 | }; |
129 | |
130 | extern void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device, |
131 | const char *direction, struct bm_xfer_ctx *c); |
132 | |
133 | static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) |
134 | { |
135 | /* word_offset counts "native long words" (32 or 64 bit), |
136 | * aligned at 64 bit. |
137 | * Encoded packet may end at an unaligned bit offset. |
138 | * In case a fallback clear text packet is transmitted in |
139 | * between, we adjust this offset back to the last 64bit |
140 | * aligned "native long word", which makes coding and decoding |
141 | * the plain text bitmap much more convenient. */ |
142 | #if BITS_PER_LONG == 64 |
143 | c->word_offset = c->bit_offset >> 6; |
144 | #elif BITS_PER_LONG == 32 |
145 | c->word_offset = c->bit_offset >> 5; |
146 | c->word_offset &= ~(1UL); |
147 | #else |
148 | # error "unsupported BITS_PER_LONG" |
149 | #endif |
150 | } |
151 | |
152 | extern unsigned int (struct drbd_connection *connection); |
153 | |
154 | /**********************************************************************/ |
155 | enum drbd_thread_state { |
156 | NONE, |
157 | RUNNING, |
158 | EXITING, |
159 | RESTARTING |
160 | }; |
161 | |
162 | struct drbd_thread { |
163 | spinlock_t t_lock; |
164 | struct task_struct *task; |
165 | struct completion stop; |
166 | enum drbd_thread_state t_state; |
167 | int (*function) (struct drbd_thread *); |
168 | struct drbd_resource *resource; |
169 | struct drbd_connection *connection; |
170 | int reset_cpu_mask; |
171 | const char *name; |
172 | }; |
173 | |
174 | static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) |
175 | { |
176 | /* THINK testing the t_state seems to be uncritical in all cases |
177 | * (but thread_{start,stop}), so we can read it *without* the lock. |
178 | * --lge */ |
179 | |
180 | smp_rmb(); |
181 | return thi->t_state; |
182 | } |
183 | |
184 | struct drbd_work { |
185 | struct list_head list; |
186 | int (*cb)(struct drbd_work *, int cancel); |
187 | }; |
188 | |
189 | struct drbd_device_work { |
190 | struct drbd_work w; |
191 | struct drbd_device *device; |
192 | }; |
193 | |
194 | #include "drbd_interval.h" |
195 | |
196 | extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *); |
197 | |
198 | extern void lock_all_resources(void); |
199 | extern void unlock_all_resources(void); |
200 | |
201 | struct drbd_request { |
202 | struct drbd_work w; |
203 | struct drbd_device *device; |
204 | |
205 | /* if local IO is not allowed, will be NULL. |
206 | * if local IO _is_ allowed, holds the locally submitted bio clone, |
207 | * or, after local IO completion, the ERR_PTR(error). |
208 | * see drbd_request_endio(). */ |
209 | struct bio *private_bio; |
210 | |
211 | struct drbd_interval i; |
212 | |
213 | /* epoch: used to check on "completion" whether this req was in |
214 | * the current epoch, and we therefore have to close it, |
215 | * causing a p_barrier packet to be send, starting a new epoch. |
216 | * |
217 | * This corresponds to "barrier" in struct p_barrier[_ack], |
218 | * and to "barrier_nr" in struct drbd_epoch (and various |
219 | * comments/function parameters/local variable names). |
220 | */ |
221 | unsigned int epoch; |
222 | |
223 | struct list_head tl_requests; /* ring list in the transfer log */ |
224 | struct bio *master_bio; /* master bio pointer */ |
225 | |
226 | /* see struct drbd_device */ |
227 | struct list_head req_pending_master_completion; |
228 | struct list_head req_pending_local; |
229 | |
230 | /* for generic IO accounting */ |
231 | unsigned long start_jif; |
232 | |
233 | /* for DRBD internal statistics */ |
234 | |
235 | /* Minimal set of time stamps to determine if we wait for activity log |
236 | * transactions, local disk or peer. 32 bit "jiffies" are good enough, |
237 | * we don't expect a DRBD request to be stalled for several month. |
238 | */ |
239 | |
240 | /* before actual request processing */ |
241 | unsigned long in_actlog_jif; |
242 | |
243 | /* local disk */ |
244 | unsigned long pre_submit_jif; |
245 | |
246 | /* per connection */ |
247 | unsigned long pre_send_jif; |
248 | unsigned long acked_jif; |
249 | unsigned long net_done_jif; |
250 | |
251 | /* Possibly even more detail to track each phase: |
252 | * master_completion_jif |
253 | * how long did it take to complete the master bio |
254 | * (application visible latency) |
255 | * allocated_jif |
256 | * how long the master bio was blocked until we finally allocated |
257 | * a tracking struct |
258 | * in_actlog_jif |
259 | * how long did we wait for activity log transactions |
260 | * |
261 | * net_queued_jif |
262 | * when did we finally queue it for sending |
263 | * pre_send_jif |
264 | * when did we start sending it |
265 | * post_send_jif |
266 | * how long did we block in the network stack trying to send it |
267 | * acked_jif |
268 | * when did we receive (or fake, in protocol A) a remote ACK |
269 | * net_done_jif |
270 | * when did we receive final acknowledgement (P_BARRIER_ACK), |
271 | * or decide, e.g. on connection loss, that we do no longer expect |
272 | * anything from this peer for this request. |
273 | * |
274 | * pre_submit_jif |
275 | * post_sub_jif |
276 | * when did we start submiting to the lower level device, |
277 | * and how long did we block in that submit function |
278 | * local_completion_jif |
279 | * how long did it take the lower level device to complete this request |
280 | */ |
281 | |
282 | |
283 | /* once it hits 0, we may complete the master_bio */ |
284 | atomic_t completion_ref; |
285 | /* once it hits 0, we may destroy this drbd_request object */ |
286 | struct kref kref; |
287 | |
288 | unsigned rq_state; /* see comments above _req_mod() */ |
289 | }; |
290 | |
291 | struct drbd_epoch { |
292 | struct drbd_connection *connection; |
293 | struct list_head list; |
294 | unsigned int barrier_nr; |
295 | atomic_t epoch_size; /* increased on every request added. */ |
296 | atomic_t active; /* increased on every req. added, and dec on every finished. */ |
297 | unsigned long flags; |
298 | }; |
299 | |
300 | /* Prototype declaration of function defined in drbd_receiver.c */ |
301 | int drbdd_init(struct drbd_thread *); |
302 | int drbd_asender(struct drbd_thread *); |
303 | |
304 | /* drbd_epoch flag bits */ |
305 | enum { |
306 | DE_HAVE_BARRIER_NUMBER, |
307 | }; |
308 | |
309 | enum epoch_event { |
310 | EV_PUT, |
311 | EV_GOT_BARRIER_NR, |
312 | EV_BECAME_LAST, |
313 | EV_CLEANUP = 32, /* used as flag */ |
314 | }; |
315 | |
316 | struct digest_info { |
317 | int digest_size; |
318 | void *digest; |
319 | }; |
320 | |
321 | struct drbd_peer_request { |
322 | struct drbd_work w; |
323 | struct drbd_peer_device *peer_device; |
324 | struct drbd_epoch *epoch; /* for writes */ |
325 | struct page *pages; |
326 | blk_opf_t opf; |
327 | atomic_t pending_bios; |
328 | struct drbd_interval i; |
329 | /* see comments on ee flag bits below */ |
330 | unsigned long flags; |
331 | unsigned long submit_jif; |
332 | union { |
333 | u64 block_id; |
334 | struct digest_info *digest; |
335 | }; |
336 | }; |
337 | |
338 | /* Equivalent to bio_op and req_op. */ |
339 | #define peer_req_op(peer_req) \ |
340 | ((peer_req)->opf & REQ_OP_MASK) |
341 | |
342 | /* ee flag bits. |
343 | * While corresponding bios are in flight, the only modification will be |
344 | * set_bit WAS_ERROR, which has to be atomic. |
345 | * If no bios are in flight yet, or all have been completed, |
346 | * non-atomic modification to ee->flags is ok. |
347 | */ |
348 | enum { |
349 | __EE_CALL_AL_COMPLETE_IO, |
350 | __EE_MAY_SET_IN_SYNC, |
351 | |
352 | /* is this a TRIM aka REQ_OP_DISCARD? */ |
353 | __EE_TRIM, |
354 | /* explicit zero-out requested, or |
355 | * our lower level cannot handle trim, |
356 | * and we want to fall back to zeroout instead */ |
357 | __EE_ZEROOUT, |
358 | |
359 | /* In case a barrier failed, |
360 | * we need to resubmit without the barrier flag. */ |
361 | __EE_RESUBMITTED, |
362 | |
363 | /* we may have several bios per peer request. |
364 | * if any of those fail, we set this flag atomically |
365 | * from the endio callback */ |
366 | __EE_WAS_ERROR, |
367 | |
368 | /* This ee has a pointer to a digest instead of a block id */ |
369 | __EE_HAS_DIGEST, |
370 | |
371 | /* Conflicting local requests need to be restarted after this request */ |
372 | __EE_RESTART_REQUESTS, |
373 | |
374 | /* The peer wants a write ACK for this (wire proto C) */ |
375 | __EE_SEND_WRITE_ACK, |
376 | |
377 | /* Is set when net_conf had two_primaries set while creating this peer_req */ |
378 | __EE_IN_INTERVAL_TREE, |
379 | |
380 | /* for debugfs: */ |
381 | /* has this been submitted, or does it still wait for something else? */ |
382 | __EE_SUBMITTED, |
383 | |
384 | /* this is/was a write request */ |
385 | __EE_WRITE, |
386 | |
387 | /* this is/was a write same request */ |
388 | __EE_WRITE_SAME, |
389 | |
390 | /* this originates from application on peer |
391 | * (not some resync or verify or other DRBD internal request) */ |
392 | __EE_APPLICATION, |
393 | |
394 | /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */ |
395 | __EE_RS_THIN_REQ, |
396 | }; |
397 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) |
398 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) |
399 | #define EE_TRIM (1<<__EE_TRIM) |
400 | #define EE_ZEROOUT (1<<__EE_ZEROOUT) |
401 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) |
402 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) |
403 | #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) |
404 | #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) |
405 | #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK) |
406 | #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) |
407 | #define EE_SUBMITTED (1<<__EE_SUBMITTED) |
408 | #define EE_WRITE (1<<__EE_WRITE) |
409 | #define EE_WRITE_SAME (1<<__EE_WRITE_SAME) |
410 | #define EE_APPLICATION (1<<__EE_APPLICATION) |
411 | #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) |
412 | |
413 | /* flag bits per device */ |
414 | enum { |
415 | UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ |
416 | MD_DIRTY, /* current uuids and flags not yet on disk */ |
417 | USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ |
418 | CL_ST_CHG_SUCCESS, |
419 | CL_ST_CHG_FAIL, |
420 | CRASHED_PRIMARY, /* This node was a crashed primary. |
421 | * Gets cleared when the state.conn |
422 | * goes into C_CONNECTED state. */ |
423 | CONSIDER_RESYNC, |
424 | |
425 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ |
426 | |
427 | BITMAP_IO, /* suspend application io; |
428 | once no more io in flight, start bitmap io */ |
429 | BITMAP_IO_QUEUED, /* Started bitmap IO */ |
430 | WAS_IO_ERROR, /* Local disk failed, returned IO error */ |
431 | WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */ |
432 | FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ |
433 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ |
434 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from |
435 | * the peer, if it changed there as well. */ |
436 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ |
437 | AL_SUSPENDED, /* Activity logging is currently suspended. */ |
438 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ |
439 | B_RS_H_DONE, /* Before resync handler done (already executed) */ |
440 | DISCARD_MY_DATA, /* discard_my_data flag per volume */ |
441 | READ_BALANCE_RR, |
442 | |
443 | FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush |
444 | * from drbd_flush_after_epoch() */ |
445 | |
446 | /* cleared only after backing device related structures have been destroyed. */ |
447 | GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */ |
448 | |
449 | /* to be used in drbd_device_post_work() */ |
450 | GO_DISKLESS, /* tell worker to schedule cleanup before detach */ |
451 | DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */ |
452 | MD_SYNC, /* tell worker to call drbd_md_sync() */ |
453 | RS_START, /* tell worker to start resync/OV */ |
454 | RS_PROGRESS, /* tell worker that resync made significant progress */ |
455 | RS_DONE, /* tell worker that resync is done */ |
456 | }; |
457 | |
458 | struct drbd_bitmap; /* opaque for drbd_device */ |
459 | |
460 | /* definition of bits in bm_flags to be used in drbd_bm_lock |
461 | * and drbd_bitmap_io and friends. */ |
462 | enum bm_flag { |
463 | /* currently locked for bulk operation */ |
464 | BM_LOCKED_MASK = 0xf, |
465 | |
466 | /* in detail, that is: */ |
467 | BM_DONT_CLEAR = 0x1, |
468 | BM_DONT_SET = 0x2, |
469 | BM_DONT_TEST = 0x4, |
470 | |
471 | /* so we can mark it locked for bulk operation, |
472 | * and still allow all non-bulk operations */ |
473 | BM_IS_LOCKED = 0x8, |
474 | |
475 | /* (test bit, count bit) allowed (common case) */ |
476 | BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED, |
477 | |
478 | /* testing bits, as well as setting new bits allowed, but clearing bits |
479 | * would be unexpected. Used during bitmap receive. Setting new bits |
480 | * requires sending of "out-of-sync" information, though. */ |
481 | BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED, |
482 | |
483 | /* for drbd_bm_write_copy_pages, everything is allowed, |
484 | * only concurrent bulk operations are locked out. */ |
485 | BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED, |
486 | }; |
487 | |
488 | struct drbd_work_queue { |
489 | struct list_head q; |
490 | spinlock_t q_lock; /* to protect the list. */ |
491 | wait_queue_head_t q_wait; |
492 | }; |
493 | |
494 | struct drbd_socket { |
495 | struct mutex mutex; |
496 | struct socket *socket; |
497 | /* this way we get our |
498 | * send/receive buffers off the stack */ |
499 | void *sbuf; |
500 | void *rbuf; |
501 | }; |
502 | |
503 | struct drbd_md { |
504 | u64 md_offset; /* sector offset to 'super' block */ |
505 | |
506 | u64 la_size_sect; /* last agreed size, unit sectors */ |
507 | spinlock_t uuid_lock; |
508 | u64 uuid[UI_SIZE]; |
509 | u64 device_uuid; |
510 | u32 flags; |
511 | u32 md_size_sect; |
512 | |
513 | s32 al_offset; /* signed relative sector offset to activity log */ |
514 | s32 bm_offset; /* signed relative sector offset to bitmap */ |
515 | |
516 | /* cached value of bdev->disk_conf->meta_dev_idx (see below) */ |
517 | s32 meta_dev_idx; |
518 | |
519 | /* see al_tr_number_to_on_disk_sector() */ |
520 | u32 al_stripes; |
521 | u32 al_stripe_size_4k; |
522 | u32 al_size_4k; /* cached product of the above */ |
523 | }; |
524 | |
525 | struct drbd_backing_dev { |
526 | struct block_device *backing_bdev; |
527 | struct bdev_handle *backing_bdev_handle; |
528 | struct block_device *md_bdev; |
529 | struct bdev_handle *md_bdev_handle; |
530 | struct drbd_md md; |
531 | struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */ |
532 | sector_t known_size; /* last known size of that backing device */ |
533 | }; |
534 | |
535 | struct drbd_md_io { |
536 | struct page *page; |
537 | unsigned long start_jif; /* last call to drbd_md_get_buffer */ |
538 | unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */ |
539 | const char *current_use; |
540 | atomic_t in_use; |
541 | unsigned int done; |
542 | int error; |
543 | }; |
544 | |
545 | struct bm_io_work { |
546 | struct drbd_work w; |
547 | struct drbd_peer_device *peer_device; |
548 | char *why; |
549 | enum bm_flag flags; |
550 | int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device); |
551 | void (*done)(struct drbd_device *device, int rv); |
552 | }; |
553 | |
554 | struct fifo_buffer { |
555 | unsigned int head_index; |
556 | unsigned int size; |
557 | int total; /* sum of all values */ |
558 | int values[] __counted_by(size); |
559 | }; |
560 | extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size); |
561 | |
562 | /* flag bits per connection */ |
563 | enum { |
564 | NET_CONGESTED, /* The data socket is congested */ |
565 | RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ |
566 | SEND_PING, |
567 | GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */ |
568 | CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */ |
569 | CONN_WD_ST_CHG_OKAY, |
570 | CONN_WD_ST_CHG_FAIL, |
571 | CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ |
572 | CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ |
573 | STATE_SENT, /* Do not change state/UUIDs while this is set */ |
574 | CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) |
575 | * pending, from drbd worker context. |
576 | */ |
577 | DISCONNECT_SENT, |
578 | |
579 | DEVICE_WORK_PENDING, /* tell worker that some device has pending work */ |
580 | }; |
581 | |
582 | enum which_state { NOW, OLD = NOW, NEW }; |
583 | |
584 | struct drbd_resource { |
585 | char *name; |
586 | #ifdef CONFIG_DEBUG_FS |
587 | struct dentry *debugfs_res; |
588 | struct dentry *debugfs_res_volumes; |
589 | struct dentry *debugfs_res_connections; |
590 | struct dentry *debugfs_res_in_flight_summary; |
591 | #endif |
592 | struct kref kref; |
593 | struct idr devices; /* volume number to device mapping */ |
594 | struct list_head connections; |
595 | struct list_head resources; |
596 | struct res_opts res_opts; |
597 | struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ |
598 | struct mutex adm_mutex; /* mutex to serialize administrative requests */ |
599 | spinlock_t req_lock; |
600 | |
601 | unsigned susp:1; /* IO suspended by user */ |
602 | unsigned susp_nod:1; /* IO suspended because no data */ |
603 | unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ |
604 | |
605 | enum write_ordering_e write_ordering; |
606 | |
607 | cpumask_var_t cpu_mask; |
608 | }; |
609 | |
610 | struct drbd_thread_timing_details |
611 | { |
612 | unsigned long start_jif; |
613 | void *cb_addr; |
614 | const char *caller_fn; |
615 | unsigned int line; |
616 | unsigned int cb_nr; |
617 | }; |
618 | |
619 | struct drbd_connection { |
620 | struct list_head connections; |
621 | struct drbd_resource *resource; |
622 | #ifdef CONFIG_DEBUG_FS |
623 | struct dentry *debugfs_conn; |
624 | struct dentry *debugfs_conn_callback_history; |
625 | struct dentry *debugfs_conn_oldest_requests; |
626 | #endif |
627 | struct kref kref; |
628 | struct idr peer_devices; /* volume number to peer device mapping */ |
629 | enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ |
630 | struct mutex cstate_mutex; /* Protects graceful disconnects */ |
631 | unsigned int connect_cnt; /* Inc each time a connection is established */ |
632 | |
633 | unsigned long flags; |
634 | struct net_conf *net_conf; /* content protected by rcu */ |
635 | wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ |
636 | |
637 | struct sockaddr_storage my_addr; |
638 | int my_addr_len; |
639 | struct sockaddr_storage peer_addr; |
640 | int peer_addr_len; |
641 | |
642 | struct drbd_socket data; /* data/barrier/cstate/parameter packets */ |
643 | struct drbd_socket meta; /* ping/ack (metadata) packets */ |
644 | int agreed_pro_version; /* actually used protocol version */ |
645 | u32 agreed_features; |
646 | unsigned long last_received; /* in jiffies, either socket */ |
647 | unsigned int ko_count; |
648 | |
649 | struct list_head transfer_log; /* all requests not yet fully processed */ |
650 | |
651 | struct crypto_shash *cram_hmac_tfm; |
652 | struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */ |
653 | struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ |
654 | struct crypto_shash *csums_tfm; |
655 | struct crypto_shash *verify_tfm; |
656 | void *int_dig_in; |
657 | void *int_dig_vv; |
658 | |
659 | /* receiver side */ |
660 | struct drbd_epoch *current_epoch; |
661 | spinlock_t epoch_lock; |
662 | unsigned int epochs; |
663 | atomic_t current_tle_nr; /* transfer log epoch number */ |
664 | unsigned current_tle_writes; /* writes seen within this tl epoch */ |
665 | |
666 | unsigned long last_reconnect_jif; |
667 | /* empty member on older kernels without blk_start_plug() */ |
668 | struct blk_plug receiver_plug; |
669 | struct drbd_thread receiver; |
670 | struct drbd_thread worker; |
671 | struct drbd_thread ack_receiver; |
672 | struct workqueue_struct *ack_sender; |
673 | |
674 | /* cached pointers, |
675 | * so we can look up the oldest pending requests more quickly. |
676 | * protected by resource->req_lock */ |
677 | struct drbd_request *req_next; /* DRBD 9: todo.req_next */ |
678 | struct drbd_request *req_ack_pending; |
679 | struct drbd_request *req_not_net_done; |
680 | |
681 | /* sender side */ |
682 | struct drbd_work_queue sender_work; |
683 | |
684 | #define DRBD_THREAD_DETAILS_HIST 16 |
685 | unsigned int w_cb_nr; /* keeps counting up */ |
686 | unsigned int r_cb_nr; /* keeps counting up */ |
687 | struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST]; |
688 | struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST]; |
689 | |
690 | struct { |
691 | unsigned long last_sent_barrier_jif; |
692 | |
693 | /* whether this sender thread |
694 | * has processed a single write yet. */ |
695 | bool seen_any_write_yet; |
696 | |
697 | /* Which barrier number to send with the next P_BARRIER */ |
698 | int current_epoch_nr; |
699 | |
700 | /* how many write requests have been sent |
701 | * with req->epoch == current_epoch_nr. |
702 | * If none, no P_BARRIER will be sent. */ |
703 | unsigned current_epoch_writes; |
704 | } send; |
705 | }; |
706 | |
707 | static inline bool has_net_conf(struct drbd_connection *connection) |
708 | { |
709 | bool has_net_conf; |
710 | |
711 | rcu_read_lock(); |
712 | has_net_conf = rcu_dereference(connection->net_conf); |
713 | rcu_read_unlock(); |
714 | |
715 | return has_net_conf; |
716 | } |
717 | |
718 | void __update_timing_details( |
719 | struct drbd_thread_timing_details *tdp, |
720 | unsigned int *cb_nr, |
721 | void *cb, |
722 | const char *fn, const unsigned int line); |
723 | |
724 | #define update_worker_timing_details(c, cb) \ |
725 | __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ ) |
726 | #define update_receiver_timing_details(c, cb) \ |
727 | __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ ) |
728 | |
729 | struct submit_worker { |
730 | struct workqueue_struct *wq; |
731 | struct work_struct worker; |
732 | |
733 | /* protected by ..->resource->req_lock */ |
734 | struct list_head writes; |
735 | }; |
736 | |
737 | struct drbd_peer_device { |
738 | struct list_head peer_devices; |
739 | struct drbd_device *device; |
740 | struct drbd_connection *connection; |
741 | struct work_struct send_acks_work; |
742 | #ifdef CONFIG_DEBUG_FS |
743 | struct dentry *debugfs_peer_dev; |
744 | #endif |
745 | }; |
746 | |
747 | struct drbd_device { |
748 | struct drbd_resource *resource; |
749 | struct list_head peer_devices; |
750 | struct list_head pending_bitmap_io; |
751 | |
752 | unsigned long flush_jif; |
753 | #ifdef CONFIG_DEBUG_FS |
754 | struct dentry *debugfs_minor; |
755 | struct dentry *debugfs_vol; |
756 | struct dentry *debugfs_vol_oldest_requests; |
757 | struct dentry *debugfs_vol_act_log_extents; |
758 | struct dentry *debugfs_vol_resync_extents; |
759 | struct dentry *debugfs_vol_data_gen_id; |
760 | struct dentry *debugfs_vol_ed_gen_id; |
761 | #endif |
762 | |
763 | unsigned int vnr; /* volume number within the connection */ |
764 | unsigned int minor; /* device minor number */ |
765 | |
766 | struct kref kref; |
767 | |
768 | /* things that are stored as / read from meta data on disk */ |
769 | unsigned long flags; |
770 | |
771 | /* configured by drbdsetup */ |
772 | struct drbd_backing_dev *ldev; |
773 | |
774 | sector_t p_size; /* partner's disk size */ |
775 | struct request_queue *rq_queue; |
776 | struct gendisk *vdisk; |
777 | |
778 | unsigned long last_reattach_jif; |
779 | struct drbd_work resync_work; |
780 | struct drbd_work unplug_work; |
781 | struct timer_list resync_timer; |
782 | struct timer_list md_sync_timer; |
783 | struct timer_list start_resync_timer; |
784 | struct timer_list request_timer; |
785 | |
786 | /* Used after attach while negotiating new disk state. */ |
787 | union drbd_state new_state_tmp; |
788 | |
789 | union drbd_dev_state state; |
790 | wait_queue_head_t misc_wait; |
791 | wait_queue_head_t state_wait; /* upon each state change. */ |
792 | unsigned int send_cnt; |
793 | unsigned int recv_cnt; |
794 | unsigned int read_cnt; |
795 | unsigned int writ_cnt; |
796 | unsigned int al_writ_cnt; |
797 | unsigned int bm_writ_cnt; |
798 | atomic_t ap_bio_cnt; /* Requests we need to complete */ |
799 | atomic_t ap_actlog_cnt; /* Requests waiting for activity log */ |
800 | atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ |
801 | atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ |
802 | atomic_t unacked_cnt; /* Need to send replies for */ |
803 | atomic_t local_cnt; /* Waiting for local completion */ |
804 | atomic_t suspend_cnt; |
805 | |
806 | /* Interval tree of pending local requests */ |
807 | struct rb_root read_requests; |
808 | struct rb_root write_requests; |
809 | |
810 | /* for statistics and timeouts */ |
811 | /* [0] read, [1] write */ |
812 | struct list_head pending_master_completion[2]; |
813 | struct list_head pending_completion[2]; |
814 | |
815 | /* use checksums for *this* resync */ |
816 | bool use_csums; |
817 | /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ |
818 | unsigned long rs_total; |
819 | /* number of resync blocks that failed in this run */ |
820 | unsigned long rs_failed; |
821 | /* Syncer's start time [unit jiffies] */ |
822 | unsigned long rs_start; |
823 | /* cumulated time in PausedSyncX state [unit jiffies] */ |
824 | unsigned long rs_paused; |
825 | /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ |
826 | unsigned long rs_same_csum; |
827 | #define DRBD_SYNC_MARKS 8 |
828 | #define DRBD_SYNC_MARK_STEP (3*HZ) |
829 | /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ |
830 | unsigned long rs_mark_left[DRBD_SYNC_MARKS]; |
831 | /* marks's time [unit jiffies] */ |
832 | unsigned long rs_mark_time[DRBD_SYNC_MARKS]; |
833 | /* current index into rs_mark_{left,time} */ |
834 | int rs_last_mark; |
835 | unsigned long rs_last_bcast; /* [unit jiffies] */ |
836 | |
837 | /* where does the admin want us to start? (sector) */ |
838 | sector_t ov_start_sector; |
839 | sector_t ov_stop_sector; |
840 | /* where are we now? (sector) */ |
841 | sector_t ov_position; |
842 | /* Start sector of out of sync range (to merge printk reporting). */ |
843 | sector_t ov_last_oos_start; |
844 | /* size of out-of-sync range in sectors. */ |
845 | sector_t ov_last_oos_size; |
846 | unsigned long ov_left; /* in bits */ |
847 | |
848 | struct drbd_bitmap *bitmap; |
849 | unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ |
850 | |
851 | /* Used to track operations of resync... */ |
852 | struct lru_cache *resync; |
853 | /* Number of locked elements in resync LRU */ |
854 | unsigned int resync_locked; |
855 | /* resync extent number waiting for application requests */ |
856 | unsigned int resync_wenr; |
857 | |
858 | int open_cnt; |
859 | u64 *p_uuid; |
860 | |
861 | struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ |
862 | struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ |
863 | struct list_head done_ee; /* need to send P_WRITE_ACK */ |
864 | struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ |
865 | struct list_head net_ee; /* zero-copy network send in progress */ |
866 | |
867 | int next_barrier_nr; |
868 | struct list_head resync_reads; |
869 | atomic_t pp_in_use; /* allocated from page pool */ |
870 | atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ |
871 | wait_queue_head_t ee_wait; |
872 | struct drbd_md_io md_io; |
873 | spinlock_t al_lock; |
874 | wait_queue_head_t al_wait; |
875 | struct lru_cache *act_log; /* activity log */ |
876 | unsigned int al_tr_number; |
877 | int al_tr_cycle; |
878 | wait_queue_head_t seq_wait; |
879 | atomic_t packet_seq; |
880 | unsigned int peer_seq; |
881 | spinlock_t peer_seq_lock; |
882 | unsigned long comm_bm_set; /* communicated number of set bits. */ |
883 | struct bm_io_work bm_io_work; |
884 | u64 ed_uuid; /* UUID of the exposed data */ |
885 | struct mutex own_state_mutex; |
886 | struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */ |
887 | char congestion_reason; /* Why we where congested... */ |
888 | atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ |
889 | atomic_t rs_sect_ev; /* for submitted resync data rate, both */ |
890 | int rs_last_sect_ev; /* counter to compare with */ |
891 | int rs_last_events; /* counter of read or write "events" (unit sectors) |
892 | * on the lower level device when we last looked. */ |
893 | int c_sync_rate; /* current resync rate after syncer throttle magic */ |
894 | struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */ |
895 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ |
896 | atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ |
897 | unsigned int peer_max_bio_size; |
898 | unsigned int local_max_bio_size; |
899 | |
900 | /* any requests that would block in drbd_make_request() |
901 | * are deferred to this single-threaded work queue */ |
902 | struct submit_worker submit; |
903 | }; |
904 | |
905 | struct drbd_bm_aio_ctx { |
906 | struct drbd_device *device; |
907 | struct list_head list; /* on device->pending_bitmap_io */; |
908 | unsigned long start_jif; |
909 | atomic_t in_flight; |
910 | unsigned int done; |
911 | unsigned flags; |
912 | #define BM_AIO_COPY_PAGES 1 |
913 | #define BM_AIO_WRITE_HINTED 2 |
914 | #define BM_AIO_WRITE_ALL_PAGES 4 |
915 | #define BM_AIO_READ 8 |
916 | int error; |
917 | struct kref kref; |
918 | }; |
919 | |
920 | struct drbd_config_context { |
921 | /* assigned from drbd_genlmsghdr */ |
922 | unsigned int minor; |
923 | /* assigned from request attributes, if present */ |
924 | unsigned int volume; |
925 | #define VOLUME_UNSPECIFIED (-1U) |
926 | /* pointer into the request skb, |
927 | * limited lifetime! */ |
928 | char *resource_name; |
929 | struct nlattr *my_addr; |
930 | struct nlattr *peer_addr; |
931 | |
932 | /* reply buffer */ |
933 | struct sk_buff *reply_skb; |
934 | /* pointer into reply buffer */ |
935 | struct drbd_genlmsghdr *reply_dh; |
936 | /* resolved from attributes, if possible */ |
937 | struct drbd_device *device; |
938 | struct drbd_resource *resource; |
939 | struct drbd_connection *connection; |
940 | }; |
941 | |
942 | static inline struct drbd_device *minor_to_device(unsigned int minor) |
943 | { |
944 | return (struct drbd_device *)idr_find(&drbd_devices, id: minor); |
945 | } |
946 | |
947 | static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device) |
948 | { |
949 | return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); |
950 | } |
951 | |
952 | static inline struct drbd_peer_device * |
953 | conn_peer_device(struct drbd_connection *connection, int volume_number) |
954 | { |
955 | return idr_find(&connection->peer_devices, id: volume_number); |
956 | } |
957 | |
958 | #define for_each_resource(resource, _resources) \ |
959 | list_for_each_entry(resource, _resources, resources) |
960 | |
961 | #define for_each_resource_rcu(resource, _resources) \ |
962 | list_for_each_entry_rcu(resource, _resources, resources) |
963 | |
964 | #define for_each_resource_safe(resource, tmp, _resources) \ |
965 | list_for_each_entry_safe(resource, tmp, _resources, resources) |
966 | |
967 | #define for_each_connection(connection, resource) \ |
968 | list_for_each_entry(connection, &resource->connections, connections) |
969 | |
970 | #define for_each_connection_rcu(connection, resource) \ |
971 | list_for_each_entry_rcu(connection, &resource->connections, connections) |
972 | |
973 | #define for_each_connection_safe(connection, tmp, resource) \ |
974 | list_for_each_entry_safe(connection, tmp, &resource->connections, connections) |
975 | |
976 | #define for_each_peer_device(peer_device, device) \ |
977 | list_for_each_entry(peer_device, &device->peer_devices, peer_devices) |
978 | |
979 | #define for_each_peer_device_rcu(peer_device, device) \ |
980 | list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices) |
981 | |
982 | #define for_each_peer_device_safe(peer_device, tmp, device) \ |
983 | list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices) |
984 | |
985 | static inline unsigned int device_to_minor(struct drbd_device *device) |
986 | { |
987 | return device->minor; |
988 | } |
989 | |
990 | /* |
991 | * function declarations |
992 | *************************/ |
993 | |
994 | /* drbd_main.c */ |
995 | |
996 | enum dds_flags { |
997 | DDSF_FORCED = 1, |
998 | DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ |
999 | }; |
1000 | |
1001 | extern void drbd_init_set_defaults(struct drbd_device *device); |
1002 | extern int drbd_thread_start(struct drbd_thread *thi); |
1003 | extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); |
1004 | #ifdef CONFIG_SMP |
1005 | extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); |
1006 | #else |
1007 | #define drbd_thread_current_set_cpu(A) ({}) |
1008 | #endif |
1009 | extern void tl_release(struct drbd_connection *, unsigned int barrier_nr, |
1010 | unsigned int set_size); |
1011 | extern void tl_clear(struct drbd_connection *); |
1012 | extern void drbd_free_sock(struct drbd_connection *connection); |
1013 | extern int drbd_send(struct drbd_connection *connection, struct socket *sock, |
1014 | void *buf, size_t size, unsigned msg_flags); |
1015 | extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t, |
1016 | unsigned); |
1017 | |
1018 | extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd); |
1019 | extern int drbd_send_protocol(struct drbd_connection *connection); |
1020 | extern int drbd_send_uuids(struct drbd_peer_device *); |
1021 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *); |
1022 | extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *); |
1023 | extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags); |
1024 | extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s); |
1025 | extern int drbd_send_current_state(struct drbd_peer_device *); |
1026 | extern int drbd_send_sync_param(struct drbd_peer_device *); |
1027 | extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, |
1028 | u32 set_size); |
1029 | extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet, |
1030 | struct drbd_peer_request *); |
1031 | extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet, |
1032 | struct p_block_req *rp); |
1033 | extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet, |
1034 | struct p_data *dp, int data_size); |
1035 | extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet, |
1036 | sector_t sector, int blksize, u64 block_id); |
1037 | extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *); |
1038 | extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet, |
1039 | struct drbd_peer_request *); |
1040 | extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req); |
1041 | extern int drbd_send_drequest(struct drbd_peer_device *, int cmd, |
1042 | sector_t sector, int size, u64 block_id); |
1043 | extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector, |
1044 | int size, void *digest, int digest_size, |
1045 | enum drbd_packet cmd); |
1046 | extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size); |
1047 | |
1048 | extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device); |
1049 | extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); |
1050 | extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); |
1051 | extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *); |
1052 | extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev); |
1053 | extern void drbd_device_cleanup(struct drbd_device *device); |
1054 | extern void drbd_print_uuids(struct drbd_device *device, const char *text); |
1055 | extern void drbd_queue_unplug(struct drbd_device *device); |
1056 | |
1057 | extern void conn_md_sync(struct drbd_connection *connection); |
1058 | extern void drbd_md_write(struct drbd_device *device, void *buffer); |
1059 | extern void drbd_md_sync(struct drbd_device *device); |
1060 | extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev); |
1061 | extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); |
1062 | extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); |
1063 | extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local); |
1064 | extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local); |
1065 | extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local); |
1066 | extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); |
1067 | extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local); |
1068 | extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local); |
1069 | extern int drbd_md_test_flag(struct drbd_backing_dev *, int); |
1070 | extern void drbd_md_mark_dirty(struct drbd_device *device); |
1071 | extern void drbd_queue_bitmap_io(struct drbd_device *device, |
1072 | int (*io_fn)(struct drbd_device *, struct drbd_peer_device *), |
1073 | void (*done)(struct drbd_device *, int), |
1074 | char *why, enum bm_flag flags, |
1075 | struct drbd_peer_device *peer_device); |
1076 | extern int drbd_bitmap_io(struct drbd_device *device, |
1077 | int (*io_fn)(struct drbd_device *, struct drbd_peer_device *), |
1078 | char *why, enum bm_flag flags, |
1079 | struct drbd_peer_device *peer_device); |
1080 | extern int drbd_bitmap_io_from_worker(struct drbd_device *device, |
1081 | int (*io_fn)(struct drbd_device *, struct drbd_peer_device *), |
1082 | char *why, enum bm_flag flags, |
1083 | struct drbd_peer_device *peer_device); |
1084 | extern int drbd_bmio_set_n_write(struct drbd_device *device, |
1085 | struct drbd_peer_device *peer_device) __must_hold(local); |
1086 | extern int drbd_bmio_clear_n_write(struct drbd_device *device, |
1087 | struct drbd_peer_device *peer_device) __must_hold(local); |
1088 | |
1089 | /* Meta data layout |
1090 | * |
1091 | * We currently have two possible layouts. |
1092 | * Offsets in (512 byte) sectors. |
1093 | * external: |
1094 | * |----------- md_size_sect ------------------| |
1095 | * [ 4k superblock ][ activity log ][ Bitmap ] |
1096 | * | al_offset == 8 | |
1097 | * | bm_offset = al_offset + X | |
1098 | * ==> bitmap sectors = md_size_sect - bm_offset |
1099 | * |
1100 | * Variants: |
1101 | * old, indexed fixed size meta data: |
1102 | * |
1103 | * internal: |
1104 | * |----------- md_size_sect ------------------| |
1105 | * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*] |
1106 | * | al_offset < 0 | |
1107 | * | bm_offset = al_offset - Y | |
1108 | * ==> bitmap sectors = Y = al_offset - bm_offset |
1109 | * |
1110 | * [padding*] are zero or up to 7 unused 512 Byte sectors to the |
1111 | * end of the device, so that the [4k superblock] will be 4k aligned. |
1112 | * |
1113 | * The activity log consists of 4k transaction blocks, |
1114 | * which are written in a ring-buffer, or striped ring-buffer like fashion, |
1115 | * which are writtensize used to be fixed 32kB, |
1116 | * but is about to become configurable. |
1117 | */ |
1118 | |
1119 | /* Our old fixed size meta data layout |
1120 | * allows up to about 3.8TB, so if you want more, |
1121 | * you need to use the "flexible" meta data format. */ |
1122 | #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */ |
1123 | #define MD_4kB_SECT 8 |
1124 | #define MD_32kB_SECT 64 |
1125 | |
1126 | /* One activity log extent represents 4M of storage */ |
1127 | #define AL_EXTENT_SHIFT 22 |
1128 | #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) |
1129 | |
1130 | /* We could make these currently hardcoded constants configurable |
1131 | * variables at create-md time (or even re-configurable at runtime?). |
1132 | * Which will require some more changes to the DRBD "super block" |
1133 | * and attach code. |
1134 | * |
1135 | * updates per transaction: |
1136 | * This many changes to the active set can be logged with one transaction. |
1137 | * This number is arbitrary. |
1138 | * context per transaction: |
1139 | * This many context extent numbers are logged with each transaction. |
1140 | * This number is resulting from the transaction block size (4k), the layout |
1141 | * of the transaction header, and the number of updates per transaction. |
1142 | * See drbd_actlog.c:struct al_transaction_on_disk |
1143 | * */ |
1144 | #define AL_UPDATES_PER_TRANSACTION 64 // arbitrary |
1145 | #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4 |
1146 | |
1147 | #if BITS_PER_LONG == 32 |
1148 | #define LN2_BPL 5 |
1149 | #define cpu_to_lel(A) cpu_to_le32(A) |
1150 | #define lel_to_cpu(A) le32_to_cpu(A) |
1151 | #elif BITS_PER_LONG == 64 |
1152 | #define LN2_BPL 6 |
1153 | #define cpu_to_lel(A) cpu_to_le64(A) |
1154 | #define lel_to_cpu(A) le64_to_cpu(A) |
1155 | #else |
1156 | #error "LN2 of BITS_PER_LONG unknown!" |
1157 | #endif |
1158 | |
1159 | /* resync bitmap */ |
1160 | /* 16MB sized 'bitmap extent' to track syncer usage */ |
1161 | struct bm_extent { |
1162 | int rs_left; /* number of bits set (out of sync) in this extent. */ |
1163 | int rs_failed; /* number of failed resync requests in this extent. */ |
1164 | unsigned long flags; |
1165 | struct lc_element lce; |
1166 | }; |
1167 | |
1168 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ |
1169 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ |
1170 | #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ |
1171 | |
1172 | /* drbd_bitmap.c */ |
1173 | /* |
1174 | * We need to store one bit for a block. |
1175 | * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. |
1176 | * Bit 0 ==> local node thinks this block is binary identical on both nodes |
1177 | * Bit 1 ==> local node thinks this block needs to be synced. |
1178 | */ |
1179 | |
1180 | #define SLEEP_TIME (HZ/10) |
1181 | |
1182 | /* We do bitmap IO in units of 4k blocks. |
1183 | * We also still have a hardcoded 4k per bit relation. */ |
1184 | #define BM_BLOCK_SHIFT 12 /* 4k per bit */ |
1185 | #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) |
1186 | /* mostly arbitrarily set the represented size of one bitmap extent, |
1187 | * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap |
1188 | * at 4k per bit resolution) */ |
1189 | #define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */ |
1190 | #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) |
1191 | |
1192 | #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) |
1193 | #error "HAVE YOU FIXED drbdmeta AS WELL??" |
1194 | #endif |
1195 | |
1196 | /* thus many _storage_ sectors are described by one bit */ |
1197 | #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) |
1198 | #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) |
1199 | #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) |
1200 | |
1201 | /* bit to represented kilo byte conversion */ |
1202 | #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) |
1203 | |
1204 | /* in which _bitmap_ extent (resp. sector) the bit for a certain |
1205 | * _storage_ sector is located in */ |
1206 | #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) |
1207 | #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) |
1208 | |
1209 | /* first storage sector a bitmap extent corresponds to */ |
1210 | #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) |
1211 | /* how much _storage_ sectors we have per bitmap extent */ |
1212 | #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) |
1213 | /* how many bits are covered by one bitmap extent (resync extent) */ |
1214 | #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) |
1215 | |
1216 | #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1) |
1217 | |
1218 | |
1219 | /* in one sector of the bitmap, we have this many activity_log extents. */ |
1220 | #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) |
1221 | |
1222 | /* the extent in "PER_EXTENT" below is an activity log extent |
1223 | * we need that many (long words/bytes) to store the bitmap |
1224 | * of one AL_EXTENT_SIZE chunk of storage. |
1225 | * we can store the bitmap for that many AL_EXTENTS within |
1226 | * one sector of the _on_disk_ bitmap: |
1227 | * bit 0 bit 37 bit 38 bit (512*8)-1 |
1228 | * ...|........|........|.. // ..|........| |
1229 | * sect. 0 `296 `304 ^(512*8*8)-1 |
1230 | * |
1231 | #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) |
1232 | #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 |
1233 | #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 |
1234 | */ |
1235 | |
1236 | #define DRBD_MAX_SECTORS_32 (0xffffffffLU) |
1237 | /* we have a certain meta data variant that has a fixed on-disk size of 128 |
1238 | * MiB, of which 4k are our "superblock", and 32k are the fixed size activity |
1239 | * log, leaving this many sectors for the bitmap. |
1240 | */ |
1241 | |
1242 | #define DRBD_MAX_SECTORS_FIXED_BM \ |
1243 | ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9))) |
1244 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM |
1245 | /* 16 TB in units of sectors */ |
1246 | #if BITS_PER_LONG == 32 |
1247 | /* adjust by one page worth of bitmap, |
1248 | * so we won't wrap around in drbd_bm_find_next_bit. |
1249 | * you should use 64bit OS for that much storage, anyways. */ |
1250 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) |
1251 | #else |
1252 | /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ |
1253 | #define DRBD_MAX_SECTORS_FLEX (1UL << 51) |
1254 | /* corresponds to (1UL << 38) bits right now. */ |
1255 | #endif |
1256 | |
1257 | /* Estimate max bio size as 256 * PAGE_SIZE, |
1258 | * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. |
1259 | * Since we may live in a mixed-platform cluster, |
1260 | * we limit us to a platform agnostic constant here for now. |
1261 | * A followup commit may allow even bigger BIO sizes, |
1262 | * once we thought that through. */ |
1263 | #define DRBD_MAX_BIO_SIZE (1U << 20) |
1264 | #if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT) |
1265 | #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE |
1266 | #endif |
1267 | #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ |
1268 | |
1269 | #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ |
1270 | #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ |
1271 | |
1272 | /* For now, don't allow more than half of what we can "activate" in one |
1273 | * activity log transaction to be discarded in one go. We may need to rework |
1274 | * drbd_al_begin_io() to allow for even larger discard ranges */ |
1275 | #define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE) |
1276 | #define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9) |
1277 | |
1278 | extern int drbd_bm_init(struct drbd_device *device); |
1279 | extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); |
1280 | extern void drbd_bm_cleanup(struct drbd_device *device); |
1281 | extern void drbd_bm_set_all(struct drbd_device *device); |
1282 | extern void drbd_bm_clear_all(struct drbd_device *device); |
1283 | /* set/clear/test only a few bits at a time */ |
1284 | extern int drbd_bm_set_bits( |
1285 | struct drbd_device *device, unsigned long s, unsigned long e); |
1286 | extern int drbd_bm_clear_bits( |
1287 | struct drbd_device *device, unsigned long s, unsigned long e); |
1288 | extern int drbd_bm_count_bits( |
1289 | struct drbd_device *device, const unsigned long s, const unsigned long e); |
1290 | /* bm_set_bits variant for use while holding drbd_bm_lock, |
1291 | * may process the whole bitmap in one go */ |
1292 | extern void _drbd_bm_set_bits(struct drbd_device *device, |
1293 | const unsigned long s, const unsigned long e); |
1294 | extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr); |
1295 | extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr); |
1296 | extern int drbd_bm_read(struct drbd_device *device, |
1297 | struct drbd_peer_device *peer_device) __must_hold(local); |
1298 | extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr); |
1299 | extern int drbd_bm_write(struct drbd_device *device, |
1300 | struct drbd_peer_device *peer_device) __must_hold(local); |
1301 | extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local); |
1302 | extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local); |
1303 | extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local); |
1304 | extern int drbd_bm_write_all(struct drbd_device *device, |
1305 | struct drbd_peer_device *peer_device) __must_hold(local); |
1306 | extern int drbd_bm_write_copy_pages(struct drbd_device *device, |
1307 | struct drbd_peer_device *peer_device) __must_hold(local); |
1308 | extern size_t drbd_bm_words(struct drbd_device *device); |
1309 | extern unsigned long drbd_bm_bits(struct drbd_device *device); |
1310 | extern sector_t drbd_bm_capacity(struct drbd_device *device); |
1311 | |
1312 | #define DRBD_END_OF_BITMAP (~(unsigned long)0) |
1313 | extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); |
1314 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ |
1315 | extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); |
1316 | extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo); |
1317 | extern unsigned long _drbd_bm_total_weight(struct drbd_device *device); |
1318 | extern unsigned long drbd_bm_total_weight(struct drbd_device *device); |
1319 | /* for receive_bitmap */ |
1320 | extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, |
1321 | size_t number, unsigned long *buffer); |
1322 | /* for _drbd_send_bitmap */ |
1323 | extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset, |
1324 | size_t number, unsigned long *buffer); |
1325 | |
1326 | extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags); |
1327 | extern void drbd_bm_unlock(struct drbd_device *device); |
1328 | /* drbd_main.c */ |
1329 | |
1330 | extern struct kmem_cache *drbd_request_cache; |
1331 | extern struct kmem_cache *drbd_ee_cache; /* peer requests */ |
1332 | extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ |
1333 | extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ |
1334 | extern mempool_t drbd_request_mempool; |
1335 | extern mempool_t drbd_ee_mempool; |
1336 | |
1337 | /* drbd's page pool, used to buffer data received from the peer, |
1338 | * or data requested by the peer. |
1339 | * |
1340 | * This does not have an emergency reserve. |
1341 | * |
1342 | * When allocating from this pool, it first takes pages from the pool. |
1343 | * Only if the pool is depleted will try to allocate from the system. |
1344 | * |
1345 | * The assumption is that pages taken from this pool will be processed, |
1346 | * and given back, "quickly", and then can be recycled, so we can avoid |
1347 | * frequent calls to alloc_page(), and still will be able to make progress even |
1348 | * under memory pressure. |
1349 | */ |
1350 | extern struct page *drbd_pp_pool; |
1351 | extern spinlock_t drbd_pp_lock; |
1352 | extern int drbd_pp_vacant; |
1353 | extern wait_queue_head_t drbd_pp_wait; |
1354 | |
1355 | /* We also need a standard (emergency-reserve backed) page pool |
1356 | * for meta data IO (activity log, bitmap). |
1357 | * We can keep it global, as long as it is used as "N pages at a time". |
1358 | * 128 should be plenty, currently we probably can get away with as few as 1. |
1359 | */ |
1360 | #define DRBD_MIN_POOL_PAGES 128 |
1361 | extern mempool_t drbd_md_io_page_pool; |
1362 | |
1363 | /* We also need to make sure we get a bio |
1364 | * when we need it for housekeeping purposes */ |
1365 | extern struct bio_set drbd_md_io_bio_set; |
1366 | |
1367 | /* And a bio_set for cloning */ |
1368 | extern struct bio_set drbd_io_bio_set; |
1369 | |
1370 | extern struct mutex resources_mutex; |
1371 | |
1372 | extern int conn_lowest_minor(struct drbd_connection *connection); |
1373 | extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); |
1374 | extern void drbd_destroy_device(struct kref *kref); |
1375 | extern void drbd_delete_device(struct drbd_device *device); |
1376 | |
1377 | extern struct drbd_resource *drbd_create_resource(const char *name); |
1378 | extern void drbd_free_resource(struct drbd_resource *resource); |
1379 | |
1380 | extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts); |
1381 | extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts); |
1382 | extern void drbd_destroy_connection(struct kref *kref); |
1383 | extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len, |
1384 | void *peer_addr, int peer_addr_len); |
1385 | extern struct drbd_resource *drbd_find_resource(const char *name); |
1386 | extern void drbd_destroy_resource(struct kref *kref); |
1387 | extern void conn_free_crypto(struct drbd_connection *connection); |
1388 | |
1389 | /* drbd_req */ |
1390 | extern void do_submit(struct work_struct *ws); |
1391 | extern void __drbd_make_request(struct drbd_device *, struct bio *); |
1392 | void drbd_submit_bio(struct bio *bio); |
1393 | extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); |
1394 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
1395 | |
1396 | |
1397 | /* drbd_nl.c */ |
1398 | |
1399 | extern struct mutex notification_mutex; |
1400 | |
1401 | extern void drbd_suspend_io(struct drbd_device *device); |
1402 | extern void drbd_resume_io(struct drbd_device *device); |
1403 | extern char *ppsize(char *buf, unsigned long long size); |
1404 | extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int); |
1405 | enum determine_dev_size { |
1406 | DS_ERROR_SHRINK = -3, |
1407 | DS_ERROR_SPACE_MD = -2, |
1408 | DS_ERROR = -1, |
1409 | DS_UNCHANGED = 0, |
1410 | DS_SHRUNK = 1, |
1411 | DS_GREW = 2, |
1412 | DS_GREW_FROM_ZERO = 3, |
1413 | }; |
1414 | extern enum determine_dev_size |
1415 | drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); |
1416 | extern void resync_after_online_grow(struct drbd_device *); |
1417 | extern void drbd_reconsider_queue_parameters(struct drbd_device *device, |
1418 | struct drbd_backing_dev *bdev, struct o_qlim *o); |
1419 | extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, |
1420 | enum drbd_role new_role, |
1421 | int force); |
1422 | extern bool conn_try_outdate_peer(struct drbd_connection *connection); |
1423 | extern void conn_try_outdate_peer_async(struct drbd_connection *connection); |
1424 | extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd); |
1425 | extern int drbd_khelper(struct drbd_device *device, char *cmd); |
1426 | |
1427 | /* drbd_worker.c */ |
1428 | /* bi_end_io handlers */ |
1429 | extern void drbd_md_endio(struct bio *bio); |
1430 | extern void drbd_peer_request_endio(struct bio *bio); |
1431 | extern void drbd_request_endio(struct bio *bio); |
1432 | extern int drbd_worker(struct drbd_thread *thi); |
1433 | enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); |
1434 | void drbd_resync_after_changed(struct drbd_device *device); |
1435 | extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side); |
1436 | extern void resume_next_sg(struct drbd_device *device); |
1437 | extern void suspend_other_sg(struct drbd_device *device); |
1438 | extern int drbd_resync_finished(struct drbd_peer_device *peer_device); |
1439 | /* maybe rather drbd_main.c ? */ |
1440 | extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); |
1441 | extern void drbd_md_put_buffer(struct drbd_device *device); |
1442 | extern int drbd_md_sync_page_io(struct drbd_device *device, |
1443 | struct drbd_backing_dev *bdev, sector_t sector, enum req_op op); |
1444 | extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, |
1445 | sector_t sector, int size); |
1446 | extern void wait_until_done_or_force_detached(struct drbd_device *device, |
1447 | struct drbd_backing_dev *bdev, unsigned int *done); |
1448 | extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device); |
1449 | |
1450 | static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device) |
1451 | { |
1452 | struct drbd_device *device = peer_device->device; |
1453 | |
1454 | if (device->ov_last_oos_size) { |
1455 | drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n" , |
1456 | (unsigned long long)device->ov_last_oos_start, |
1457 | (unsigned long)device->ov_last_oos_size); |
1458 | } |
1459 | device->ov_last_oos_size = 0; |
1460 | } |
1461 | |
1462 | |
1463 | extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *); |
1464 | extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *, |
1465 | void *); |
1466 | /* worker callbacks */ |
1467 | extern int w_e_end_data_req(struct drbd_work *, int); |
1468 | extern int w_e_end_rsdata_req(struct drbd_work *, int); |
1469 | extern int w_e_end_csum_rs_req(struct drbd_work *, int); |
1470 | extern int w_e_end_ov_reply(struct drbd_work *, int); |
1471 | extern int w_e_end_ov_req(struct drbd_work *, int); |
1472 | extern int w_ov_finished(struct drbd_work *, int); |
1473 | extern int w_resync_timer(struct drbd_work *, int); |
1474 | extern int w_send_write_hint(struct drbd_work *, int); |
1475 | extern int w_send_dblock(struct drbd_work *, int); |
1476 | extern int w_send_read_req(struct drbd_work *, int); |
1477 | extern int w_e_reissue(struct drbd_work *, int); |
1478 | extern int w_restart_disk_io(struct drbd_work *, int); |
1479 | extern int w_send_out_of_sync(struct drbd_work *, int); |
1480 | |
1481 | extern void resync_timer_fn(struct timer_list *t); |
1482 | extern void start_resync_timer_fn(struct timer_list *t); |
1483 | |
1484 | extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); |
1485 | |
1486 | /* drbd_receiver.c */ |
1487 | extern int drbd_issue_discard_or_zero_out(struct drbd_device *device, |
1488 | sector_t start, unsigned int nr_sectors, int flags); |
1489 | extern int drbd_receiver(struct drbd_thread *thi); |
1490 | extern int drbd_ack_receiver(struct drbd_thread *thi); |
1491 | extern void drbd_send_ping_wf(struct work_struct *ws); |
1492 | extern void drbd_send_acks_wf(struct work_struct *ws); |
1493 | extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); |
1494 | extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, |
1495 | bool throttle_if_app_is_waiting); |
1496 | extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req); |
1497 | extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); |
1498 | extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, |
1499 | sector_t, unsigned int, |
1500 | unsigned int, |
1501 | gfp_t) __must_hold(local); |
1502 | extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, |
1503 | int); |
1504 | #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) |
1505 | #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) |
1506 | extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); |
1507 | extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); |
1508 | extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); |
1509 | extern int drbd_connected(struct drbd_peer_device *); |
1510 | |
1511 | /* sets the number of 512 byte sectors of our virtual device */ |
1512 | void drbd_set_my_capacity(struct drbd_device *device, sector_t size); |
1513 | |
1514 | /* |
1515 | * used to submit our private bio |
1516 | */ |
1517 | static inline void drbd_submit_bio_noacct(struct drbd_device *device, |
1518 | int fault_type, struct bio *bio) |
1519 | { |
1520 | __release(local); |
1521 | if (!bio->bi_bdev) { |
1522 | drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n" ); |
1523 | bio->bi_status = BLK_STS_IOERR; |
1524 | bio_endio(bio); |
1525 | return; |
1526 | } |
1527 | |
1528 | if (drbd_insert_fault(device, type: fault_type)) |
1529 | bio_io_error(bio); |
1530 | else |
1531 | submit_bio_noacct(bio); |
1532 | } |
1533 | |
1534 | void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, |
1535 | enum write_ordering_e wo); |
1536 | |
1537 | /* drbd_proc.c */ |
1538 | extern struct proc_dir_entry *drbd_proc; |
1539 | int drbd_seq_show(struct seq_file *seq, void *v); |
1540 | |
1541 | /* drbd_actlog.c */ |
1542 | extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); |
1543 | extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); |
1544 | extern void drbd_al_begin_io_commit(struct drbd_device *device); |
1545 | extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); |
1546 | extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i); |
1547 | extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i); |
1548 | extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); |
1549 | extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); |
1550 | extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector); |
1551 | extern void drbd_rs_cancel_all(struct drbd_device *device); |
1552 | extern int drbd_rs_del_all(struct drbd_device *device); |
1553 | extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device, |
1554 | sector_t sector, int size); |
1555 | extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go); |
1556 | |
1557 | enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; |
1558 | extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size, |
1559 | enum update_sync_bits_mode mode); |
1560 | #define drbd_set_in_sync(peer_device, sector, size) \ |
1561 | __drbd_change_sync(peer_device, sector, size, SET_IN_SYNC) |
1562 | #define drbd_set_out_of_sync(peer_device, sector, size) \ |
1563 | __drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC) |
1564 | #define drbd_rs_failed_io(peer_device, sector, size) \ |
1565 | __drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED) |
1566 | extern void drbd_al_shrink(struct drbd_device *device); |
1567 | extern int drbd_al_initialize(struct drbd_device *, void *); |
1568 | |
1569 | /* drbd_nl.c */ |
1570 | /* state info broadcast */ |
1571 | struct sib_info { |
1572 | enum drbd_state_info_bcast_reason sib_reason; |
1573 | union { |
1574 | struct { |
1575 | char *helper_name; |
1576 | unsigned helper_exit_code; |
1577 | }; |
1578 | struct { |
1579 | union drbd_state os; |
1580 | union drbd_state ns; |
1581 | }; |
1582 | }; |
1583 | }; |
1584 | void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); |
1585 | |
1586 | extern int notify_resource_state(struct sk_buff *, |
1587 | unsigned int, |
1588 | struct drbd_resource *, |
1589 | struct resource_info *, |
1590 | enum drbd_notification_type); |
1591 | extern int notify_device_state(struct sk_buff *, |
1592 | unsigned int, |
1593 | struct drbd_device *, |
1594 | struct device_info *, |
1595 | enum drbd_notification_type); |
1596 | extern int notify_connection_state(struct sk_buff *, |
1597 | unsigned int, |
1598 | struct drbd_connection *, |
1599 | struct connection_info *, |
1600 | enum drbd_notification_type); |
1601 | extern int notify_peer_device_state(struct sk_buff *, |
1602 | unsigned int, |
1603 | struct drbd_peer_device *, |
1604 | struct peer_device_info *, |
1605 | enum drbd_notification_type); |
1606 | extern void notify_helper(enum drbd_notification_type, struct drbd_device *, |
1607 | struct drbd_connection *, const char *, int); |
1608 | |
1609 | /* |
1610 | * inline helper functions |
1611 | *************************/ |
1612 | |
1613 | /* see also page_chain_add and friends in drbd_receiver.c */ |
1614 | static inline struct page *page_chain_next(struct page *page) |
1615 | { |
1616 | return (struct page *)page_private(page); |
1617 | } |
1618 | #define page_chain_for_each(page) \ |
1619 | for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ |
1620 | page = page_chain_next(page)) |
1621 | #define page_chain_for_each_safe(page, n) \ |
1622 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) |
1623 | |
1624 | |
1625 | static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) |
1626 | { |
1627 | struct page *page = peer_req->pages; |
1628 | page_chain_for_each(page) { |
1629 | if (page_count(page) > 1) |
1630 | return 1; |
1631 | } |
1632 | return 0; |
1633 | } |
1634 | |
1635 | static inline union drbd_state drbd_read_state(struct drbd_device *device) |
1636 | { |
1637 | struct drbd_resource *resource = device->resource; |
1638 | union drbd_state rv; |
1639 | |
1640 | rv.i = device->state.i; |
1641 | rv.susp = resource->susp; |
1642 | rv.susp_nod = resource->susp_nod; |
1643 | rv.susp_fen = resource->susp_fen; |
1644 | |
1645 | return rv; |
1646 | } |
1647 | |
1648 | enum drbd_force_detach_flags { |
1649 | DRBD_READ_ERROR, |
1650 | DRBD_WRITE_ERROR, |
1651 | DRBD_META_IO_ERROR, |
1652 | DRBD_FORCE_DETACH, |
1653 | }; |
1654 | |
1655 | #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) |
1656 | static inline void __drbd_chk_io_error_(struct drbd_device *device, |
1657 | enum drbd_force_detach_flags df, |
1658 | const char *where) |
1659 | { |
1660 | enum drbd_io_error_p ep; |
1661 | |
1662 | rcu_read_lock(); |
1663 | ep = rcu_dereference(device->ldev->disk_conf)->on_io_error; |
1664 | rcu_read_unlock(); |
1665 | switch (ep) { |
1666 | case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */ |
1667 | if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) { |
1668 | if (drbd_ratelimit()) |
1669 | drbd_err(device, "Local IO failed in %s.\n" , where); |
1670 | if (device->state.disk > D_INCONSISTENT) |
1671 | _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); |
1672 | break; |
1673 | } |
1674 | fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ |
1675 | case EP_DETACH: |
1676 | case EP_CALL_HELPER: |
1677 | /* Remember whether we saw a READ or WRITE error. |
1678 | * |
1679 | * Recovery of the affected area for WRITE failure is covered |
1680 | * by the activity log. |
1681 | * READ errors may fall outside that area though. Certain READ |
1682 | * errors can be "healed" by writing good data to the affected |
1683 | * blocks, which triggers block re-allocation in lower layers. |
1684 | * |
1685 | * If we can not write the bitmap after a READ error, |
1686 | * we may need to trigger a full sync (see w_go_diskless()). |
1687 | * |
1688 | * Force-detach is not really an IO error, but rather a |
1689 | * desperate measure to try to deal with a completely |
1690 | * unresponsive lower level IO stack. |
1691 | * Still it should be treated as a WRITE error. |
1692 | * |
1693 | * Meta IO error is always WRITE error: |
1694 | * we read meta data only once during attach, |
1695 | * which will fail in case of errors. |
1696 | */ |
1697 | set_bit(nr: WAS_IO_ERROR, addr: &device->flags); |
1698 | if (df == DRBD_READ_ERROR) |
1699 | set_bit(nr: WAS_READ_ERROR, addr: &device->flags); |
1700 | if (df == DRBD_FORCE_DETACH) |
1701 | set_bit(nr: FORCE_DETACH, addr: &device->flags); |
1702 | if (device->state.disk > D_FAILED) { |
1703 | _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL); |
1704 | drbd_err(device, |
1705 | "Local IO failed in %s. Detaching...\n" , where); |
1706 | } |
1707 | break; |
1708 | } |
1709 | } |
1710 | |
1711 | /** |
1712 | * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers |
1713 | * @device: DRBD device. |
1714 | * @error: Error code passed to the IO completion callback |
1715 | * @forcedetach: Force detach. I.e. the error happened while accessing the meta data |
1716 | * |
1717 | * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) |
1718 | */ |
1719 | #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) |
1720 | static inline void drbd_chk_io_error_(struct drbd_device *device, |
1721 | int error, enum drbd_force_detach_flags forcedetach, const char *where) |
1722 | { |
1723 | if (error) { |
1724 | unsigned long flags; |
1725 | spin_lock_irqsave(&device->resource->req_lock, flags); |
1726 | __drbd_chk_io_error_(device, df: forcedetach, where); |
1727 | spin_unlock_irqrestore(lock: &device->resource->req_lock, flags); |
1728 | } |
1729 | } |
1730 | |
1731 | |
1732 | /** |
1733 | * drbd_md_first_sector() - Returns the first sector number of the meta data area |
1734 | * @bdev: Meta data block device. |
1735 | * |
1736 | * BTW, for internal meta data, this happens to be the maximum capacity |
1737 | * we could agree upon with our peer node. |
1738 | */ |
1739 | static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) |
1740 | { |
1741 | switch (bdev->md.meta_dev_idx) { |
1742 | case DRBD_MD_INDEX_INTERNAL: |
1743 | case DRBD_MD_INDEX_FLEX_INT: |
1744 | return bdev->md.md_offset + bdev->md.bm_offset; |
1745 | case DRBD_MD_INDEX_FLEX_EXT: |
1746 | default: |
1747 | return bdev->md.md_offset; |
1748 | } |
1749 | } |
1750 | |
1751 | /** |
1752 | * drbd_md_last_sector() - Return the last sector number of the meta data area |
1753 | * @bdev: Meta data block device. |
1754 | */ |
1755 | static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) |
1756 | { |
1757 | switch (bdev->md.meta_dev_idx) { |
1758 | case DRBD_MD_INDEX_INTERNAL: |
1759 | case DRBD_MD_INDEX_FLEX_INT: |
1760 | return bdev->md.md_offset + MD_4kB_SECT -1; |
1761 | case DRBD_MD_INDEX_FLEX_EXT: |
1762 | default: |
1763 | return bdev->md.md_offset + bdev->md.md_size_sect -1; |
1764 | } |
1765 | } |
1766 | |
1767 | /* Returns the number of 512 byte sectors of the device */ |
1768 | static inline sector_t drbd_get_capacity(struct block_device *bdev) |
1769 | { |
1770 | return bdev ? bdev_nr_sectors(bdev) : 0; |
1771 | } |
1772 | |
1773 | /** |
1774 | * drbd_get_max_capacity() - Returns the capacity we announce to out peer |
1775 | * @bdev: Meta data block device. |
1776 | * |
1777 | * returns the capacity we announce to out peer. we clip ourselves at the |
1778 | * various MAX_SECTORS, because if we don't, current implementation will |
1779 | * oops sooner or later |
1780 | */ |
1781 | static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) |
1782 | { |
1783 | sector_t s; |
1784 | |
1785 | switch (bdev->md.meta_dev_idx) { |
1786 | case DRBD_MD_INDEX_INTERNAL: |
1787 | case DRBD_MD_INDEX_FLEX_INT: |
1788 | s = drbd_get_capacity(bdev: bdev->backing_bdev) |
1789 | ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, |
1790 | drbd_md_first_sector(bdev)) |
1791 | : 0; |
1792 | break; |
1793 | case DRBD_MD_INDEX_FLEX_EXT: |
1794 | s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, |
1795 | drbd_get_capacity(bdev->backing_bdev)); |
1796 | /* clip at maximum size the meta device can support */ |
1797 | s = min_t(sector_t, s, |
1798 | BM_EXT_TO_SECT(bdev->md.md_size_sect |
1799 | - bdev->md.bm_offset)); |
1800 | break; |
1801 | default: |
1802 | s = min_t(sector_t, DRBD_MAX_SECTORS, |
1803 | drbd_get_capacity(bdev->backing_bdev)); |
1804 | } |
1805 | return s; |
1806 | } |
1807 | |
1808 | /** |
1809 | * drbd_md_ss() - Return the sector number of our meta data super block |
1810 | * @bdev: Meta data block device. |
1811 | */ |
1812 | static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) |
1813 | { |
1814 | const int meta_dev_idx = bdev->md.meta_dev_idx; |
1815 | |
1816 | if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT) |
1817 | return 0; |
1818 | |
1819 | /* Since drbd08, internal meta data is always "flexible". |
1820 | * position: last 4k aligned block of 4k size */ |
1821 | if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL || |
1822 | meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) |
1823 | return (drbd_get_capacity(bdev: bdev->backing_bdev) & ~7ULL) - 8; |
1824 | |
1825 | /* external, some index; this is the old fixed size layout */ |
1826 | return MD_128MB_SECT * bdev->md.meta_dev_idx; |
1827 | } |
1828 | |
1829 | static inline void |
1830 | drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) |
1831 | { |
1832 | unsigned long flags; |
1833 | spin_lock_irqsave(&q->q_lock, flags); |
1834 | list_add_tail(new: &w->list, head: &q->q); |
1835 | spin_unlock_irqrestore(lock: &q->q_lock, flags); |
1836 | wake_up(&q->q_wait); |
1837 | } |
1838 | |
1839 | static inline void |
1840 | drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w) |
1841 | { |
1842 | unsigned long flags; |
1843 | spin_lock_irqsave(&q->q_lock, flags); |
1844 | if (list_empty_careful(head: &w->list)) |
1845 | list_add_tail(new: &w->list, head: &q->q); |
1846 | spin_unlock_irqrestore(lock: &q->q_lock, flags); |
1847 | wake_up(&q->q_wait); |
1848 | } |
1849 | |
1850 | static inline void |
1851 | drbd_device_post_work(struct drbd_device *device, int work_bit) |
1852 | { |
1853 | if (!test_and_set_bit(nr: work_bit, addr: &device->flags)) { |
1854 | struct drbd_connection *connection = |
1855 | first_peer_device(device)->connection; |
1856 | struct drbd_work_queue *q = &connection->sender_work; |
1857 | if (!test_and_set_bit(nr: DEVICE_WORK_PENDING, addr: &connection->flags)) |
1858 | wake_up(&q->q_wait); |
1859 | } |
1860 | } |
1861 | |
1862 | extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue); |
1863 | |
1864 | /* To get the ack_receiver out of the blocking network stack, |
1865 | * so it can change its sk_rcvtimeo from idle- to ping-timeout, |
1866 | * and send a ping, we need to send a signal. |
1867 | * Which signal we send is irrelevant. */ |
1868 | static inline void wake_ack_receiver(struct drbd_connection *connection) |
1869 | { |
1870 | struct task_struct *task = connection->ack_receiver.task; |
1871 | if (task && get_t_state(thi: &connection->ack_receiver) == RUNNING) |
1872 | send_sig(SIGXCPU, task, 1); |
1873 | } |
1874 | |
1875 | static inline void request_ping(struct drbd_connection *connection) |
1876 | { |
1877 | set_bit(nr: SEND_PING, addr: &connection->flags); |
1878 | wake_ack_receiver(connection); |
1879 | } |
1880 | |
1881 | extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *); |
1882 | extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *); |
1883 | extern int conn_send_command(struct drbd_connection *, struct drbd_socket *, |
1884 | enum drbd_packet, unsigned int, void *, |
1885 | unsigned int); |
1886 | extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *, |
1887 | enum drbd_packet, unsigned int, void *, |
1888 | unsigned int); |
1889 | |
1890 | extern int drbd_send_ping(struct drbd_connection *connection); |
1891 | extern int drbd_send_ping_ack(struct drbd_connection *connection); |
1892 | extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state); |
1893 | extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state); |
1894 | |
1895 | static inline void drbd_thread_stop(struct drbd_thread *thi) |
1896 | { |
1897 | _drbd_thread_stop(thi, restart: false, wait: true); |
1898 | } |
1899 | |
1900 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) |
1901 | { |
1902 | _drbd_thread_stop(thi, restart: false, wait: false); |
1903 | } |
1904 | |
1905 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) |
1906 | { |
1907 | _drbd_thread_stop(thi, restart: true, wait: false); |
1908 | } |
1909 | |
1910 | /* counts how many answer packets packets we expect from our peer, |
1911 | * for either explicit application requests, |
1912 | * or implicit barrier packets as necessary. |
1913 | * increased: |
1914 | * w_send_barrier |
1915 | * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ); |
1916 | * it is much easier and equally valid to count what we queue for the |
1917 | * worker, even before it actually was queued or send. |
1918 | * (drbd_make_request_common; recovery path on read io-error) |
1919 | * decreased: |
1920 | * got_BarrierAck (respective tl_clear, tl_clear_barrier) |
1921 | * _req_mod(req, DATA_RECEIVED) |
1922 | * [from receive_DataReply] |
1923 | * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED) |
1924 | * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] |
1925 | * for some reason it is NOT decreased in got_NegAck, |
1926 | * but in the resulting cleanup code from report_params. |
1927 | * we should try to remember the reason for that... |
1928 | * _req_mod(req, SEND_FAILED or SEND_CANCELED) |
1929 | * _req_mod(req, CONNECTION_LOST_WHILE_PENDING) |
1930 | * [from tl_clear_barrier] |
1931 | */ |
1932 | static inline void inc_ap_pending(struct drbd_device *device) |
1933 | { |
1934 | atomic_inc(v: &device->ap_pending_cnt); |
1935 | } |
1936 | |
1937 | #define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0)) |
1938 | static inline int __dec_ap_pending(struct drbd_device *device) |
1939 | { |
1940 | int ap_pending_cnt = atomic_dec_return(v: &device->ap_pending_cnt); |
1941 | |
1942 | if (ap_pending_cnt == 0) |
1943 | wake_up(&device->misc_wait); |
1944 | return ap_pending_cnt; |
1945 | } |
1946 | |
1947 | /* counts how many resync-related answers we still expect from the peer |
1948 | * increase decrease |
1949 | * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) |
1950 | * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) |
1951 | * (or P_NEG_ACK with ID_SYNCER) |
1952 | */ |
1953 | static inline void inc_rs_pending(struct drbd_peer_device *peer_device) |
1954 | { |
1955 | atomic_inc(v: &peer_device->device->rs_pending_cnt); |
1956 | } |
1957 | |
1958 | #define dec_rs_pending(peer_device) \ |
1959 | ((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0)) |
1960 | static inline int __dec_rs_pending(struct drbd_peer_device *peer_device) |
1961 | { |
1962 | return atomic_dec_return(v: &peer_device->device->rs_pending_cnt); |
1963 | } |
1964 | |
1965 | /* counts how many answers we still need to send to the peer. |
1966 | * increased on |
1967 | * receive_Data unless protocol A; |
1968 | * we need to send a P_RECV_ACK (proto B) |
1969 | * or P_WRITE_ACK (proto C) |
1970 | * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK |
1971 | * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA |
1972 | * receive_Barrier_* we need to send a P_BARRIER_ACK |
1973 | */ |
1974 | static inline void inc_unacked(struct drbd_device *device) |
1975 | { |
1976 | atomic_inc(v: &device->unacked_cnt); |
1977 | } |
1978 | |
1979 | #define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0)) |
1980 | static inline int __dec_unacked(struct drbd_device *device) |
1981 | { |
1982 | return atomic_dec_return(v: &device->unacked_cnt); |
1983 | } |
1984 | |
1985 | #define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0)) |
1986 | static inline int __sub_unacked(struct drbd_device *device, int n) |
1987 | { |
1988 | return atomic_sub_return(i: n, v: &device->unacked_cnt); |
1989 | } |
1990 | |
1991 | static inline bool is_sync_target_state(enum drbd_conns connection_state) |
1992 | { |
1993 | return connection_state == C_SYNC_TARGET || |
1994 | connection_state == C_PAUSED_SYNC_T; |
1995 | } |
1996 | |
1997 | static inline bool is_sync_source_state(enum drbd_conns connection_state) |
1998 | { |
1999 | return connection_state == C_SYNC_SOURCE || |
2000 | connection_state == C_PAUSED_SYNC_S; |
2001 | } |
2002 | |
2003 | static inline bool is_sync_state(enum drbd_conns connection_state) |
2004 | { |
2005 | return is_sync_source_state(connection_state) || |
2006 | is_sync_target_state(connection_state); |
2007 | } |
2008 | |
2009 | /** |
2010 | * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev |
2011 | * @_device: DRBD device. |
2012 | * @_min_state: Minimum device state required for success. |
2013 | * |
2014 | * You have to call put_ldev() when finished working with device->ldev. |
2015 | */ |
2016 | #define get_ldev_if_state(_device, _min_state) \ |
2017 | (_get_ldev_if_state((_device), (_min_state)) ? \ |
2018 | ({ __acquire(x); true; }) : false) |
2019 | #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT) |
2020 | |
2021 | static inline void put_ldev(struct drbd_device *device) |
2022 | { |
2023 | enum drbd_disk_state disk_state = device->state.disk; |
2024 | /* We must check the state *before* the atomic_dec becomes visible, |
2025 | * or we have a theoretical race where someone hitting zero, |
2026 | * while state still D_FAILED, will then see D_DISKLESS in the |
2027 | * condition below and calling into destroy, where he must not, yet. */ |
2028 | int i = atomic_dec_return(v: &device->local_cnt); |
2029 | |
2030 | /* This may be called from some endio handler, |
2031 | * so we must not sleep here. */ |
2032 | |
2033 | __release(local); |
2034 | D_ASSERT(device, i >= 0); |
2035 | if (i == 0) { |
2036 | if (disk_state == D_DISKLESS) |
2037 | /* even internal references gone, safe to destroy */ |
2038 | drbd_device_post_work(device, work_bit: DESTROY_DISK); |
2039 | if (disk_state == D_FAILED) |
2040 | /* all application IO references gone. */ |
2041 | if (!test_and_set_bit(nr: GOING_DISKLESS, addr: &device->flags)) |
2042 | drbd_device_post_work(device, work_bit: GO_DISKLESS); |
2043 | wake_up(&device->misc_wait); |
2044 | } |
2045 | } |
2046 | |
2047 | #ifndef __CHECKER__ |
2048 | static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) |
2049 | { |
2050 | int io_allowed; |
2051 | |
2052 | /* never get a reference while D_DISKLESS */ |
2053 | if (device->state.disk == D_DISKLESS) |
2054 | return 0; |
2055 | |
2056 | atomic_inc(v: &device->local_cnt); |
2057 | io_allowed = (device->state.disk >= mins); |
2058 | if (!io_allowed) |
2059 | put_ldev(device); |
2060 | return io_allowed; |
2061 | } |
2062 | #else |
2063 | extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins); |
2064 | #endif |
2065 | |
2066 | /* this throttles on-the-fly application requests |
2067 | * according to max_buffers settings; |
2068 | * maybe re-implement using semaphores? */ |
2069 | static inline int drbd_get_max_buffers(struct drbd_device *device) |
2070 | { |
2071 | struct net_conf *nc; |
2072 | int mxb; |
2073 | |
2074 | rcu_read_lock(); |
2075 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); |
2076 | mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ |
2077 | rcu_read_unlock(); |
2078 | |
2079 | return mxb; |
2080 | } |
2081 | |
2082 | static inline int drbd_state_is_stable(struct drbd_device *device) |
2083 | { |
2084 | union drbd_dev_state s = device->state; |
2085 | |
2086 | /* DO NOT add a default clause, we want the compiler to warn us |
2087 | * for any newly introduced state we may have forgotten to add here */ |
2088 | |
2089 | switch ((enum drbd_conns)s.conn) { |
2090 | /* new io only accepted when there is no connection, ... */ |
2091 | case C_STANDALONE: |
2092 | case C_WF_CONNECTION: |
2093 | /* ... or there is a well established connection. */ |
2094 | case C_CONNECTED: |
2095 | case C_SYNC_SOURCE: |
2096 | case C_SYNC_TARGET: |
2097 | case C_VERIFY_S: |
2098 | case C_VERIFY_T: |
2099 | case C_PAUSED_SYNC_S: |
2100 | case C_PAUSED_SYNC_T: |
2101 | case C_AHEAD: |
2102 | case C_BEHIND: |
2103 | /* transitional states, IO allowed */ |
2104 | case C_DISCONNECTING: |
2105 | case C_UNCONNECTED: |
2106 | case C_TIMEOUT: |
2107 | case C_BROKEN_PIPE: |
2108 | case C_NETWORK_FAILURE: |
2109 | case C_PROTOCOL_ERROR: |
2110 | case C_TEAR_DOWN: |
2111 | case C_WF_REPORT_PARAMS: |
2112 | case C_STARTING_SYNC_S: |
2113 | case C_STARTING_SYNC_T: |
2114 | break; |
2115 | |
2116 | /* Allow IO in BM exchange states with new protocols */ |
2117 | case C_WF_BITMAP_S: |
2118 | if (first_peer_device(device)->connection->agreed_pro_version < 96) |
2119 | return 0; |
2120 | break; |
2121 | |
2122 | /* no new io accepted in these states */ |
2123 | case C_WF_BITMAP_T: |
2124 | case C_WF_SYNC_UUID: |
2125 | case C_MASK: |
2126 | /* not "stable" */ |
2127 | return 0; |
2128 | } |
2129 | |
2130 | switch ((enum drbd_disk_state)s.disk) { |
2131 | case D_DISKLESS: |
2132 | case D_INCONSISTENT: |
2133 | case D_OUTDATED: |
2134 | case D_CONSISTENT: |
2135 | case D_UP_TO_DATE: |
2136 | case D_FAILED: |
2137 | /* disk state is stable as well. */ |
2138 | break; |
2139 | |
2140 | /* no new io accepted during transitional states */ |
2141 | case D_ATTACHING: |
2142 | case D_NEGOTIATING: |
2143 | case D_UNKNOWN: |
2144 | case D_MASK: |
2145 | /* not "stable" */ |
2146 | return 0; |
2147 | } |
2148 | |
2149 | return 1; |
2150 | } |
2151 | |
2152 | static inline int drbd_suspended(struct drbd_device *device) |
2153 | { |
2154 | struct drbd_resource *resource = device->resource; |
2155 | |
2156 | return resource->susp || resource->susp_fen || resource->susp_nod; |
2157 | } |
2158 | |
2159 | static inline bool may_inc_ap_bio(struct drbd_device *device) |
2160 | { |
2161 | int mxb = drbd_get_max_buffers(device); |
2162 | |
2163 | if (drbd_suspended(device)) |
2164 | return false; |
2165 | if (atomic_read(v: &device->suspend_cnt)) |
2166 | return false; |
2167 | |
2168 | /* to avoid potential deadlock or bitmap corruption, |
2169 | * in various places, we only allow new application io |
2170 | * to start during "stable" states. */ |
2171 | |
2172 | /* no new io accepted when attaching or detaching the disk */ |
2173 | if (!drbd_state_is_stable(device)) |
2174 | return false; |
2175 | |
2176 | /* since some older kernels don't have atomic_add_unless, |
2177 | * and we are within the spinlock anyways, we have this workaround. */ |
2178 | if (atomic_read(v: &device->ap_bio_cnt) > mxb) |
2179 | return false; |
2180 | if (test_bit(BITMAP_IO, &device->flags)) |
2181 | return false; |
2182 | return true; |
2183 | } |
2184 | |
2185 | static inline bool inc_ap_bio_cond(struct drbd_device *device) |
2186 | { |
2187 | bool rv = false; |
2188 | |
2189 | spin_lock_irq(lock: &device->resource->req_lock); |
2190 | rv = may_inc_ap_bio(device); |
2191 | if (rv) |
2192 | atomic_inc(v: &device->ap_bio_cnt); |
2193 | spin_unlock_irq(lock: &device->resource->req_lock); |
2194 | |
2195 | return rv; |
2196 | } |
2197 | |
2198 | static inline void inc_ap_bio(struct drbd_device *device) |
2199 | { |
2200 | /* we wait here |
2201 | * as long as the device is suspended |
2202 | * until the bitmap is no longer on the fly during connection |
2203 | * handshake as long as we would exceed the max_buffer limit. |
2204 | * |
2205 | * to avoid races with the reconnect code, |
2206 | * we need to atomic_inc within the spinlock. */ |
2207 | |
2208 | wait_event(device->misc_wait, inc_ap_bio_cond(device)); |
2209 | } |
2210 | |
2211 | static inline void dec_ap_bio(struct drbd_device *device) |
2212 | { |
2213 | int mxb = drbd_get_max_buffers(device); |
2214 | int ap_bio = atomic_dec_return(v: &device->ap_bio_cnt); |
2215 | |
2216 | D_ASSERT(device, ap_bio >= 0); |
2217 | |
2218 | if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { |
2219 | if (!test_and_set_bit(nr: BITMAP_IO_QUEUED, addr: &device->flags)) |
2220 | drbd_queue_work(q: &first_peer_device(device)-> |
2221 | connection->sender_work, |
2222 | w: &device->bm_io_work.w); |
2223 | } |
2224 | |
2225 | /* this currently does wake_up for every dec_ap_bio! |
2226 | * maybe rather introduce some type of hysteresis? |
2227 | * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ |
2228 | if (ap_bio < mxb) |
2229 | wake_up(&device->misc_wait); |
2230 | } |
2231 | |
2232 | static inline bool verify_can_do_stop_sector(struct drbd_device *device) |
2233 | { |
2234 | return first_peer_device(device)->connection->agreed_pro_version >= 97 && |
2235 | first_peer_device(device)->connection->agreed_pro_version != 100; |
2236 | } |
2237 | |
2238 | static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) |
2239 | { |
2240 | int changed = device->ed_uuid != val; |
2241 | device->ed_uuid = val; |
2242 | return changed; |
2243 | } |
2244 | |
2245 | static inline int drbd_queue_order_type(struct drbd_device *device) |
2246 | { |
2247 | /* sorry, we currently have no working implementation |
2248 | * of distributed TCQ stuff */ |
2249 | #ifndef QUEUE_ORDERED_NONE |
2250 | #define QUEUE_ORDERED_NONE 0 |
2251 | #endif |
2252 | return QUEUE_ORDERED_NONE; |
2253 | } |
2254 | |
2255 | static inline struct drbd_connection *first_connection(struct drbd_resource *resource) |
2256 | { |
2257 | return list_first_entry_or_null(&resource->connections, |
2258 | struct drbd_connection, connections); |
2259 | } |
2260 | |
2261 | #endif |
2262 | |