1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_UTIL_H |
3 | #define _BCACHEFS_UTIL_H |
4 | |
5 | #include <linux/bio.h> |
6 | #include <linux/blkdev.h> |
7 | #include <linux/closure.h> |
8 | #include <linux/errno.h> |
9 | #include <linux/freezer.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/sched/clock.h> |
12 | #include <linux/llist.h> |
13 | #include <linux/log2.h> |
14 | #include <linux/percpu.h> |
15 | #include <linux/preempt.h> |
16 | #include <linux/ratelimit.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <linux/workqueue.h> |
20 | |
21 | #include "mean_and_variance.h" |
22 | |
23 | #include "darray.h" |
24 | #include "time_stats.h" |
25 | |
26 | struct closure; |
27 | |
28 | #ifdef CONFIG_BCACHEFS_DEBUG |
29 | #define EBUG_ON(cond) BUG_ON(cond) |
30 | #else |
31 | #define EBUG_ON(cond) |
32 | #endif |
33 | |
34 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
35 | #define CPU_BIG_ENDIAN 0 |
36 | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
37 | #define CPU_BIG_ENDIAN 1 |
38 | #endif |
39 | |
40 | /* type hackery */ |
41 | |
42 | #define type_is_exact(_val, _type) \ |
43 | __builtin_types_compatible_p(typeof(_val), _type) |
44 | |
45 | #define type_is(_val, _type) \ |
46 | (__builtin_types_compatible_p(typeof(_val), _type) || \ |
47 | __builtin_types_compatible_p(typeof(_val), const _type)) |
48 | |
49 | /* Userspace doesn't align allocations as nicely as the kernel allocators: */ |
50 | static inline size_t buf_pages(void *p, size_t len) |
51 | { |
52 | return DIV_ROUND_UP(len + |
53 | ((unsigned long) p & (PAGE_SIZE - 1)), |
54 | PAGE_SIZE); |
55 | } |
56 | |
57 | #define HEAP(type) \ |
58 | struct { \ |
59 | size_t size, used; \ |
60 | type *data; \ |
61 | } |
62 | |
63 | #define DECLARE_HEAP(type, name) HEAP(type) name |
64 | |
65 | #define init_heap(heap, _size, gfp) \ |
66 | ({ \ |
67 | (heap)->used = 0; \ |
68 | (heap)->size = (_size); \ |
69 | (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\ |
70 | (gfp)); \ |
71 | }) |
72 | |
73 | #define free_heap(heap) \ |
74 | do { \ |
75 | kvfree((heap)->data); \ |
76 | (heap)->data = NULL; \ |
77 | } while (0) |
78 | |
79 | #define heap_set_backpointer(h, i, _fn) \ |
80 | do { \ |
81 | void (*fn)(typeof(h), size_t) = _fn; \ |
82 | if (fn) \ |
83 | fn(h, i); \ |
84 | } while (0) |
85 | |
86 | #define heap_swap(h, i, j, set_backpointer) \ |
87 | do { \ |
88 | swap((h)->data[i], (h)->data[j]); \ |
89 | heap_set_backpointer(h, i, set_backpointer); \ |
90 | heap_set_backpointer(h, j, set_backpointer); \ |
91 | } while (0) |
92 | |
93 | #define heap_peek(h) \ |
94 | ({ \ |
95 | EBUG_ON(!(h)->used); \ |
96 | (h)->data[0]; \ |
97 | }) |
98 | |
99 | #define heap_full(h) ((h)->used == (h)->size) |
100 | |
101 | #define heap_sift_down(h, i, cmp, set_backpointer) \ |
102 | do { \ |
103 | size_t _c, _j = i; \ |
104 | \ |
105 | for (; _j * 2 + 1 < (h)->used; _j = _c) { \ |
106 | _c = _j * 2 + 1; \ |
107 | if (_c + 1 < (h)->used && \ |
108 | cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \ |
109 | _c++; \ |
110 | \ |
111 | if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ |
112 | break; \ |
113 | heap_swap(h, _c, _j, set_backpointer); \ |
114 | } \ |
115 | } while (0) |
116 | |
117 | #define heap_sift_up(h, i, cmp, set_backpointer) \ |
118 | do { \ |
119 | while (i) { \ |
120 | size_t p = (i - 1) / 2; \ |
121 | if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ |
122 | break; \ |
123 | heap_swap(h, i, p, set_backpointer); \ |
124 | i = p; \ |
125 | } \ |
126 | } while (0) |
127 | |
128 | #define __heap_add(h, d, cmp, set_backpointer) \ |
129 | ({ \ |
130 | size_t _i = (h)->used++; \ |
131 | (h)->data[_i] = d; \ |
132 | heap_set_backpointer(h, _i, set_backpointer); \ |
133 | \ |
134 | heap_sift_up(h, _i, cmp, set_backpointer); \ |
135 | _i; \ |
136 | }) |
137 | |
138 | #define heap_add(h, d, cmp, set_backpointer) \ |
139 | ({ \ |
140 | bool _r = !heap_full(h); \ |
141 | if (_r) \ |
142 | __heap_add(h, d, cmp, set_backpointer); \ |
143 | _r; \ |
144 | }) |
145 | |
146 | #define heap_add_or_replace(h, new, cmp, set_backpointer) \ |
147 | do { \ |
148 | if (!heap_add(h, new, cmp, set_backpointer) && \ |
149 | cmp(h, new, heap_peek(h)) >= 0) { \ |
150 | (h)->data[0] = new; \ |
151 | heap_set_backpointer(h, 0, set_backpointer); \ |
152 | heap_sift_down(h, 0, cmp, set_backpointer); \ |
153 | } \ |
154 | } while (0) |
155 | |
156 | #define heap_del(h, i, cmp, set_backpointer) \ |
157 | do { \ |
158 | size_t _i = (i); \ |
159 | \ |
160 | BUG_ON(_i >= (h)->used); \ |
161 | (h)->used--; \ |
162 | if ((_i) < (h)->used) { \ |
163 | heap_swap(h, _i, (h)->used, set_backpointer); \ |
164 | heap_sift_up(h, _i, cmp, set_backpointer); \ |
165 | heap_sift_down(h, _i, cmp, set_backpointer); \ |
166 | } \ |
167 | } while (0) |
168 | |
169 | #define heap_pop(h, d, cmp, set_backpointer) \ |
170 | ({ \ |
171 | bool _r = (h)->used; \ |
172 | if (_r) { \ |
173 | (d) = (h)->data[0]; \ |
174 | heap_del(h, 0, cmp, set_backpointer); \ |
175 | } \ |
176 | _r; \ |
177 | }) |
178 | |
179 | #define heap_resort(heap, cmp, set_backpointer) \ |
180 | do { \ |
181 | ssize_t _i; \ |
182 | for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ |
183 | heap_sift_down(heap, _i, cmp, set_backpointer); \ |
184 | } while (0) |
185 | |
186 | #define ANYSINT_MAX(t) \ |
187 | ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) |
188 | |
189 | #include "printbuf.h" |
190 | |
191 | #define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__) |
192 | #define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__) |
193 | #define printbuf_str(_buf) bch2_printbuf_str(_buf) |
194 | #define printbuf_exit(_buf) bch2_printbuf_exit(_buf) |
195 | |
196 | #define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf) |
197 | #define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf) |
198 | #define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n) |
199 | |
200 | #define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n) |
201 | #define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n) |
202 | |
203 | #define prt_newline(_out) bch2_prt_newline(_out) |
204 | #define prt_tab(_out) bch2_prt_tab(_out) |
205 | #define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out) |
206 | |
207 | #define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__) |
208 | #define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v)) |
209 | #define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__) |
210 | #define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__) |
211 | #define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__) |
212 | #define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__) |
213 | #define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__) |
214 | #define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__) |
215 | #define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__) |
216 | |
217 | void bch2_pr_time_units(struct printbuf *, u64); |
218 | void bch2_prt_datetime(struct printbuf *, time64_t); |
219 | |
220 | #ifdef __KERNEL__ |
221 | static inline void uuid_unparse_lower(u8 *uuid, char *out) |
222 | { |
223 | sprintf(buf: out, fmt: "%pUb" , uuid); |
224 | } |
225 | #else |
226 | #include <uuid/uuid.h> |
227 | #endif |
228 | |
229 | static inline void pr_uuid(struct printbuf *out, u8 *uuid) |
230 | { |
231 | char uuid_str[40]; |
232 | |
233 | uuid_unparse_lower(uuid, out: uuid_str); |
234 | prt_printf(out, "%s" , uuid_str); |
235 | } |
236 | |
237 | int bch2_strtoint_h(const char *, int *); |
238 | int bch2_strtouint_h(const char *, unsigned int *); |
239 | int bch2_strtoll_h(const char *, long long *); |
240 | int bch2_strtoull_h(const char *, unsigned long long *); |
241 | int bch2_strtou64_h(const char *, u64 *); |
242 | |
243 | static inline int bch2_strtol_h(const char *cp, long *res) |
244 | { |
245 | #if BITS_PER_LONG == 32 |
246 | return bch2_strtoint_h(cp, (int *) res); |
247 | #else |
248 | return bch2_strtoll_h(cp, (long long *) res); |
249 | #endif |
250 | } |
251 | |
252 | static inline int bch2_strtoul_h(const char *cp, long *res) |
253 | { |
254 | #if BITS_PER_LONG == 32 |
255 | return bch2_strtouint_h(cp, (unsigned int *) res); |
256 | #else |
257 | return bch2_strtoull_h(cp, (unsigned long long *) res); |
258 | #endif |
259 | } |
260 | |
261 | #define strtoi_h(cp, res) \ |
262 | ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\ |
263 | : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\ |
264 | : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\ |
265 | : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\ |
266 | : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\ |
267 | : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\ |
268 | : -EINVAL) |
269 | |
270 | #define strtoul_safe(cp, var) \ |
271 | ({ \ |
272 | unsigned long _v; \ |
273 | int _r = kstrtoul(cp, 10, &_v); \ |
274 | if (!_r) \ |
275 | var = _v; \ |
276 | _r; \ |
277 | }) |
278 | |
279 | #define strtoul_safe_clamp(cp, var, min, max) \ |
280 | ({ \ |
281 | unsigned long _v; \ |
282 | int _r = kstrtoul(cp, 10, &_v); \ |
283 | if (!_r) \ |
284 | var = clamp_t(typeof(var), _v, min, max); \ |
285 | _r; \ |
286 | }) |
287 | |
288 | #define strtoul_safe_restrict(cp, var, min, max) \ |
289 | ({ \ |
290 | unsigned long _v; \ |
291 | int _r = kstrtoul(cp, 10, &_v); \ |
292 | if (!_r && _v >= min && _v <= max) \ |
293 | var = _v; \ |
294 | else \ |
295 | _r = -EINVAL; \ |
296 | _r; \ |
297 | }) |
298 | |
299 | #define snprint(out, var) \ |
300 | prt_printf(out, \ |
301 | type_is(var, int) ? "%i\n" \ |
302 | : type_is(var, unsigned) ? "%u\n" \ |
303 | : type_is(var, long) ? "%li\n" \ |
304 | : type_is(var, unsigned long) ? "%lu\n" \ |
305 | : type_is(var, s64) ? "%lli\n" \ |
306 | : type_is(var, u64) ? "%llu\n" \ |
307 | : type_is(var, char *) ? "%s\n" \ |
308 | : "%i\n", var) |
309 | |
310 | bool bch2_is_zero(const void *, size_t); |
311 | |
312 | u64 bch2_read_flag_list(char *, const char * const[]); |
313 | |
314 | void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned); |
315 | void bch2_prt_u64_base2(struct printbuf *, u64); |
316 | |
317 | void bch2_print_string_as_lines(const char *prefix, const char *lines); |
318 | |
319 | typedef DARRAY(unsigned long) bch_stacktrace; |
320 | int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t); |
321 | void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *); |
322 | int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t); |
323 | |
324 | static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev) |
325 | { |
326 | #ifdef __KERNEL__ |
327 | prt_printf(out, "%pg" , bdev); |
328 | #else |
329 | prt_str(out, bdev->name); |
330 | #endif |
331 | } |
332 | |
333 | void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *); |
334 | |
335 | #define ewma_add(ewma, val, weight) \ |
336 | ({ \ |
337 | typeof(ewma) _ewma = (ewma); \ |
338 | typeof(weight) _weight = (weight); \ |
339 | \ |
340 | (((_ewma << _weight) - _ewma) + (val)) >> _weight; \ |
341 | }) |
342 | |
343 | struct bch_ratelimit { |
344 | /* Next time we want to do some work, in nanoseconds */ |
345 | u64 next; |
346 | |
347 | /* |
348 | * Rate at which we want to do work, in units per nanosecond |
349 | * The units here correspond to the units passed to |
350 | * bch2_ratelimit_increment() |
351 | */ |
352 | unsigned rate; |
353 | }; |
354 | |
355 | static inline void bch2_ratelimit_reset(struct bch_ratelimit *d) |
356 | { |
357 | d->next = local_clock(); |
358 | } |
359 | |
360 | u64 bch2_ratelimit_delay(struct bch_ratelimit *); |
361 | void bch2_ratelimit_increment(struct bch_ratelimit *, u64); |
362 | |
363 | struct bch_pd_controller { |
364 | struct bch_ratelimit rate; |
365 | unsigned long last_update; |
366 | |
367 | s64 last_actual; |
368 | s64 smoothed_derivative; |
369 | |
370 | unsigned p_term_inverse; |
371 | unsigned d_smooth; |
372 | unsigned d_term; |
373 | |
374 | /* for exporting to sysfs (no effect on behavior) */ |
375 | s64 last_derivative; |
376 | s64 last_proportional; |
377 | s64 last_change; |
378 | s64 last_target; |
379 | |
380 | /* |
381 | * If true, the rate will not increase if bch2_ratelimit_delay() |
382 | * is not being called often enough. |
383 | */ |
384 | bool backpressure; |
385 | }; |
386 | |
387 | void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int); |
388 | void bch2_pd_controller_init(struct bch_pd_controller *); |
389 | void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *); |
390 | |
391 | #define sysfs_pd_controller_attribute(name) \ |
392 | rw_attribute(name##_rate); \ |
393 | rw_attribute(name##_rate_bytes); \ |
394 | rw_attribute(name##_rate_d_term); \ |
395 | rw_attribute(name##_rate_p_term_inverse); \ |
396 | read_attribute(name##_rate_debug) |
397 | |
398 | #define sysfs_pd_controller_files(name) \ |
399 | &sysfs_##name##_rate, \ |
400 | &sysfs_##name##_rate_bytes, \ |
401 | &sysfs_##name##_rate_d_term, \ |
402 | &sysfs_##name##_rate_p_term_inverse, \ |
403 | &sysfs_##name##_rate_debug |
404 | |
405 | #define sysfs_pd_controller_show(name, var) \ |
406 | do { \ |
407 | sysfs_hprint(name##_rate, (var)->rate.rate); \ |
408 | sysfs_print(name##_rate_bytes, (var)->rate.rate); \ |
409 | sysfs_print(name##_rate_d_term, (var)->d_term); \ |
410 | sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ |
411 | \ |
412 | if (attr == &sysfs_##name##_rate_debug) \ |
413 | bch2_pd_controller_debug_to_text(out, var); \ |
414 | } while (0) |
415 | |
416 | #define sysfs_pd_controller_store(name, var) \ |
417 | do { \ |
418 | sysfs_strtoul_clamp(name##_rate, \ |
419 | (var)->rate.rate, 1, UINT_MAX); \ |
420 | sysfs_strtoul_clamp(name##_rate_bytes, \ |
421 | (var)->rate.rate, 1, UINT_MAX); \ |
422 | sysfs_strtoul(name##_rate_d_term, (var)->d_term); \ |
423 | sysfs_strtoul_clamp(name##_rate_p_term_inverse, \ |
424 | (var)->p_term_inverse, 1, INT_MAX); \ |
425 | } while (0) |
426 | |
427 | #define container_of_or_null(ptr, type, member) \ |
428 | ({ \ |
429 | typeof(ptr) _ptr = ptr; \ |
430 | _ptr ? container_of(_ptr, type, member) : NULL; \ |
431 | }) |
432 | |
433 | /* Does linear interpolation between powers of two */ |
434 | static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) |
435 | { |
436 | unsigned fract = x & ~(~0 << fract_bits); |
437 | |
438 | x >>= fract_bits; |
439 | x = 1 << x; |
440 | x += (x * fract) >> fract_bits; |
441 | |
442 | return x; |
443 | } |
444 | |
445 | void bch2_bio_map(struct bio *bio, void *base, size_t); |
446 | int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); |
447 | |
448 | static inline sector_t bdev_sectors(struct block_device *bdev) |
449 | { |
450 | return bdev->bd_inode->i_size >> 9; |
451 | } |
452 | |
453 | #define closure_bio_submit(bio, cl) \ |
454 | do { \ |
455 | closure_get(cl); \ |
456 | submit_bio(bio); \ |
457 | } while (0) |
458 | |
459 | #define kthread_wait(cond) \ |
460 | ({ \ |
461 | int _ret = 0; \ |
462 | \ |
463 | while (1) { \ |
464 | set_current_state(TASK_INTERRUPTIBLE); \ |
465 | if (kthread_should_stop()) { \ |
466 | _ret = -1; \ |
467 | break; \ |
468 | } \ |
469 | \ |
470 | if (cond) \ |
471 | break; \ |
472 | \ |
473 | schedule(); \ |
474 | } \ |
475 | set_current_state(TASK_RUNNING); \ |
476 | _ret; \ |
477 | }) |
478 | |
479 | #define kthread_wait_freezable(cond) \ |
480 | ({ \ |
481 | int _ret = 0; \ |
482 | while (1) { \ |
483 | set_current_state(TASK_INTERRUPTIBLE); \ |
484 | if (kthread_should_stop()) { \ |
485 | _ret = -1; \ |
486 | break; \ |
487 | } \ |
488 | \ |
489 | if (cond) \ |
490 | break; \ |
491 | \ |
492 | schedule(); \ |
493 | try_to_freeze(); \ |
494 | } \ |
495 | set_current_state(TASK_RUNNING); \ |
496 | _ret; \ |
497 | }) |
498 | |
499 | size_t bch2_rand_range(size_t); |
500 | |
501 | void memcpy_to_bio(struct bio *, struct bvec_iter, const void *); |
502 | void memcpy_from_bio(void *, struct bio *, struct bvec_iter); |
503 | |
504 | static inline void memcpy_u64s_small(void *dst, const void *src, |
505 | unsigned u64s) |
506 | { |
507 | u64 *d = dst; |
508 | const u64 *s = src; |
509 | |
510 | while (u64s--) |
511 | *d++ = *s++; |
512 | } |
513 | |
514 | static inline void __memcpy_u64s(void *dst, const void *src, |
515 | unsigned u64s) |
516 | { |
517 | #ifdef CONFIG_X86_64 |
518 | long d0, d1, d2; |
519 | |
520 | asm volatile("rep ; movsq" |
521 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
522 | : "0" (u64s), "1" (dst), "2" (src) |
523 | : "memory" ); |
524 | #else |
525 | u64 *d = dst; |
526 | const u64 *s = src; |
527 | |
528 | while (u64s--) |
529 | *d++ = *s++; |
530 | #endif |
531 | } |
532 | |
533 | static inline void memcpy_u64s(void *dst, const void *src, |
534 | unsigned u64s) |
535 | { |
536 | EBUG_ON(!(dst >= src + u64s * sizeof(u64) || |
537 | dst + u64s * sizeof(u64) <= src)); |
538 | |
539 | __memcpy_u64s(dst, src, u64s); |
540 | } |
541 | |
542 | static inline void __memmove_u64s_down(void *dst, const void *src, |
543 | unsigned u64s) |
544 | { |
545 | __memcpy_u64s(dst, src, u64s); |
546 | } |
547 | |
548 | static inline void memmove_u64s_down(void *dst, const void *src, |
549 | unsigned u64s) |
550 | { |
551 | EBUG_ON(dst > src); |
552 | |
553 | __memmove_u64s_down(dst, src, u64s); |
554 | } |
555 | |
556 | static inline void __memmove_u64s_down_small(void *dst, const void *src, |
557 | unsigned u64s) |
558 | { |
559 | memcpy_u64s_small(dst, src, u64s); |
560 | } |
561 | |
562 | static inline void memmove_u64s_down_small(void *dst, const void *src, |
563 | unsigned u64s) |
564 | { |
565 | EBUG_ON(dst > src); |
566 | |
567 | __memmove_u64s_down_small(dst, src, u64s); |
568 | } |
569 | |
570 | static inline void __memmove_u64s_up_small(void *_dst, const void *_src, |
571 | unsigned u64s) |
572 | { |
573 | u64 *dst = (u64 *) _dst + u64s; |
574 | u64 *src = (u64 *) _src + u64s; |
575 | |
576 | while (u64s--) |
577 | *--dst = *--src; |
578 | } |
579 | |
580 | static inline void memmove_u64s_up_small(void *dst, const void *src, |
581 | unsigned u64s) |
582 | { |
583 | EBUG_ON(dst < src); |
584 | |
585 | __memmove_u64s_up_small(dst: dst, src: src, u64s); |
586 | } |
587 | |
588 | static inline void __memmove_u64s_up(void *_dst, const void *_src, |
589 | unsigned u64s) |
590 | { |
591 | u64 *dst = (u64 *) _dst + u64s - 1; |
592 | u64 *src = (u64 *) _src + u64s - 1; |
593 | |
594 | #ifdef CONFIG_X86_64 |
595 | long d0, d1, d2; |
596 | |
597 | asm volatile("std ;\n" |
598 | "rep ; movsq\n" |
599 | "cld ;\n" |
600 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
601 | : "0" (u64s), "1" (dst), "2" (src) |
602 | : "memory" ); |
603 | #else |
604 | while (u64s--) |
605 | *dst-- = *src--; |
606 | #endif |
607 | } |
608 | |
609 | static inline void memmove_u64s_up(void *dst, const void *src, |
610 | unsigned u64s) |
611 | { |
612 | EBUG_ON(dst < src); |
613 | |
614 | __memmove_u64s_up(dst: dst, src: src, u64s); |
615 | } |
616 | |
617 | static inline void memmove_u64s(void *dst, const void *src, |
618 | unsigned u64s) |
619 | { |
620 | if (dst < src) |
621 | __memmove_u64s_down(dst, src, u64s); |
622 | else |
623 | __memmove_u64s_up(dst: dst, src: src, u64s); |
624 | } |
625 | |
626 | /* Set the last few bytes up to a u64 boundary given an offset into a buffer. */ |
627 | static inline void memset_u64s_tail(void *s, int c, unsigned bytes) |
628 | { |
629 | unsigned rem = round_up(bytes, sizeof(u64)) - bytes; |
630 | |
631 | memset(s + bytes, c, rem); |
632 | } |
633 | |
634 | /* just the memmove, doesn't update @_nr */ |
635 | #define __array_insert_item(_array, _nr, _pos) \ |
636 | memmove(&(_array)[(_pos) + 1], \ |
637 | &(_array)[(_pos)], \ |
638 | sizeof((_array)[0]) * ((_nr) - (_pos))) |
639 | |
640 | #define array_insert_item(_array, _nr, _pos, _new_item) \ |
641 | do { \ |
642 | __array_insert_item(_array, _nr, _pos); \ |
643 | (_nr)++; \ |
644 | (_array)[(_pos)] = (_new_item); \ |
645 | } while (0) |
646 | |
647 | #define array_remove_items(_array, _nr, _pos, _nr_to_remove) \ |
648 | do { \ |
649 | (_nr) -= (_nr_to_remove); \ |
650 | memmove(&(_array)[(_pos)], \ |
651 | &(_array)[(_pos) + (_nr_to_remove)], \ |
652 | sizeof((_array)[0]) * ((_nr) - (_pos))); \ |
653 | } while (0) |
654 | |
655 | #define array_remove_item(_array, _nr, _pos) \ |
656 | array_remove_items(_array, _nr, _pos, 1) |
657 | |
658 | static inline void __move_gap(void *array, size_t element_size, |
659 | size_t nr, size_t size, |
660 | size_t old_gap, size_t new_gap) |
661 | { |
662 | size_t gap_end = old_gap + size - nr; |
663 | |
664 | if (new_gap < old_gap) { |
665 | size_t move = old_gap - new_gap; |
666 | |
667 | memmove(array + element_size * (gap_end - move), |
668 | array + element_size * (old_gap - move), |
669 | element_size * move); |
670 | } else if (new_gap > old_gap) { |
671 | size_t move = new_gap - old_gap; |
672 | |
673 | memmove(array + element_size * old_gap, |
674 | array + element_size * gap_end, |
675 | element_size * move); |
676 | } |
677 | } |
678 | |
679 | /* Move the gap in a gap buffer: */ |
680 | #define move_gap(_d, _new_gap) \ |
681 | do { \ |
682 | BUG_ON(_new_gap > (_d)->nr); \ |
683 | BUG_ON((_d)->gap > (_d)->nr); \ |
684 | \ |
685 | __move_gap((_d)->data, sizeof((_d)->data[0]), \ |
686 | (_d)->nr, (_d)->size, (_d)->gap, _new_gap); \ |
687 | (_d)->gap = _new_gap; \ |
688 | } while (0) |
689 | |
690 | #define bubble_sort(_base, _nr, _cmp) \ |
691 | do { \ |
692 | ssize_t _i, _last; \ |
693 | bool _swapped = true; \ |
694 | \ |
695 | for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\ |
696 | _swapped = false; \ |
697 | for (_i = 0; _i < _last; _i++) \ |
698 | if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \ |
699 | swap((_base)[_i], (_base)[_i + 1]); \ |
700 | _swapped = true; \ |
701 | } \ |
702 | } \ |
703 | } while (0) |
704 | |
705 | static inline u64 percpu_u64_get(u64 __percpu *src) |
706 | { |
707 | u64 ret = 0; |
708 | int cpu; |
709 | |
710 | for_each_possible_cpu(cpu) |
711 | ret += *per_cpu_ptr(src, cpu); |
712 | return ret; |
713 | } |
714 | |
715 | static inline void percpu_u64_set(u64 __percpu *dst, u64 src) |
716 | { |
717 | int cpu; |
718 | |
719 | for_each_possible_cpu(cpu) |
720 | *per_cpu_ptr(dst, cpu) = 0; |
721 | this_cpu_write(*dst, src); |
722 | } |
723 | |
724 | static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr) |
725 | { |
726 | unsigned i; |
727 | |
728 | for (i = 0; i < nr; i++) |
729 | acc[i] += src[i]; |
730 | } |
731 | |
732 | static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src, |
733 | unsigned nr) |
734 | { |
735 | int cpu; |
736 | |
737 | for_each_possible_cpu(cpu) |
738 | acc_u64s(acc, per_cpu_ptr(src, cpu), nr); |
739 | } |
740 | |
741 | static inline void percpu_memset(void __percpu *p, int c, size_t bytes) |
742 | { |
743 | int cpu; |
744 | |
745 | for_each_possible_cpu(cpu) |
746 | memset(per_cpu_ptr(p, cpu), c, bytes); |
747 | } |
748 | |
749 | u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned); |
750 | |
751 | #define cmp_int(l, r) ((l > r) - (l < r)) |
752 | |
753 | static inline int u8_cmp(u8 l, u8 r) |
754 | { |
755 | return cmp_int(l, r); |
756 | } |
757 | |
758 | static inline int cmp_le32(__le32 l, __le32 r) |
759 | { |
760 | return cmp_int(le32_to_cpu(l), le32_to_cpu(r)); |
761 | } |
762 | |
763 | #include <linux/uuid.h> |
764 | |
765 | #define QSTR(n) { { { .len = strlen(n) } }, .name = n } |
766 | |
767 | static inline bool qstr_eq(const struct qstr l, const struct qstr r) |
768 | { |
769 | return l.len == r.len && !memcmp(p: l.name, q: r.name, size: l.len); |
770 | } |
771 | |
772 | void bch2_darray_str_exit(darray_str *); |
773 | int bch2_split_devs(const char *, darray_str *); |
774 | |
775 | #ifdef __KERNEL__ |
776 | |
777 | __must_check |
778 | static inline int copy_to_user_errcode(void __user *to, const void *from, unsigned long n) |
779 | { |
780 | return copy_to_user(to, from, n) ? -EFAULT : 0; |
781 | } |
782 | |
783 | __must_check |
784 | static inline int copy_from_user_errcode(void *to, const void __user *from, unsigned long n) |
785 | { |
786 | return copy_from_user(to, from, n) ? -EFAULT : 0; |
787 | } |
788 | |
789 | #endif |
790 | |
791 | static inline void mod_bit(long nr, volatile unsigned long *addr, bool v) |
792 | { |
793 | if (v) |
794 | set_bit(nr, addr); |
795 | else |
796 | clear_bit(nr, addr); |
797 | } |
798 | |
799 | static inline void __set_bit_le64(size_t bit, __le64 *addr) |
800 | { |
801 | addr[bit / 64] |= cpu_to_le64(BIT_ULL(bit % 64)); |
802 | } |
803 | |
804 | static inline void __clear_bit_le64(size_t bit, __le64 *addr) |
805 | { |
806 | addr[bit / 64] &= ~cpu_to_le64(BIT_ULL(bit % 64)); |
807 | } |
808 | |
809 | static inline bool test_bit_le64(size_t bit, __le64 *addr) |
810 | { |
811 | return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0; |
812 | } |
813 | |
814 | #endif /* _BCACHEFS_UTIL_H */ |
815 | |