1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * (C) 2001 Clemson University and The University of Chicago |
4 | * |
5 | * See COPYING in top-level directory. |
6 | */ |
7 | #include "protocol.h" |
8 | #include "orangefs-kernel.h" |
9 | #include "orangefs-bufmap.h" |
10 | |
11 | struct slot_map { |
12 | int c; |
13 | wait_queue_head_t q; |
14 | int count; |
15 | unsigned long *map; |
16 | }; |
17 | |
18 | static struct slot_map rw_map = { |
19 | .c = -1, |
20 | .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) |
21 | }; |
22 | static struct slot_map readdir_map = { |
23 | .c = -1, |
24 | .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) |
25 | }; |
26 | |
27 | |
28 | static void install(struct slot_map *m, int count, unsigned long *map) |
29 | { |
30 | spin_lock(lock: &m->q.lock); |
31 | m->c = m->count = count; |
32 | m->map = map; |
33 | wake_up_all_locked(&m->q); |
34 | spin_unlock(lock: &m->q.lock); |
35 | } |
36 | |
37 | static void mark_killed(struct slot_map *m) |
38 | { |
39 | spin_lock(lock: &m->q.lock); |
40 | m->c -= m->count + 1; |
41 | spin_unlock(lock: &m->q.lock); |
42 | } |
43 | |
44 | static void run_down(struct slot_map *m) |
45 | { |
46 | DEFINE_WAIT(wait); |
47 | spin_lock(lock: &m->q.lock); |
48 | if (m->c != -1) { |
49 | for (;;) { |
50 | if (likely(list_empty(&wait.entry))) |
51 | __add_wait_queue_entry_tail(wq_head: &m->q, wq_entry: &wait); |
52 | set_current_state(TASK_UNINTERRUPTIBLE); |
53 | |
54 | if (m->c == -1) |
55 | break; |
56 | |
57 | spin_unlock(lock: &m->q.lock); |
58 | schedule(); |
59 | spin_lock(lock: &m->q.lock); |
60 | } |
61 | __remove_wait_queue(wq_head: &m->q, wq_entry: &wait); |
62 | __set_current_state(TASK_RUNNING); |
63 | } |
64 | m->map = NULL; |
65 | spin_unlock(lock: &m->q.lock); |
66 | } |
67 | |
68 | static void put(struct slot_map *m, int slot) |
69 | { |
70 | int v; |
71 | spin_lock(lock: &m->q.lock); |
72 | __clear_bit(slot, m->map); |
73 | v = ++m->c; |
74 | if (v > 0) |
75 | wake_up_locked(&m->q); |
76 | if (unlikely(v == -1)) /* finished dying */ |
77 | wake_up_all_locked(&m->q); |
78 | spin_unlock(lock: &m->q.lock); |
79 | } |
80 | |
81 | static int wait_for_free(struct slot_map *m) |
82 | { |
83 | long left = slot_timeout_secs * HZ; |
84 | DEFINE_WAIT(wait); |
85 | |
86 | do { |
87 | long n = left, t; |
88 | if (likely(list_empty(&wait.entry))) |
89 | __add_wait_queue_entry_tail_exclusive(wq_head: &m->q, wq_entry: &wait); |
90 | set_current_state(TASK_INTERRUPTIBLE); |
91 | |
92 | if (m->c > 0) |
93 | break; |
94 | |
95 | if (m->c < 0) { |
96 | /* we are waiting for map to be installed */ |
97 | /* it would better be there soon, or we go away */ |
98 | if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) |
99 | n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; |
100 | } |
101 | spin_unlock(lock: &m->q.lock); |
102 | t = schedule_timeout(timeout: n); |
103 | spin_lock(lock: &m->q.lock); |
104 | if (unlikely(!t) && n != left && m->c < 0) |
105 | left = t; |
106 | else |
107 | left = t + (left - n); |
108 | if (signal_pending(current)) |
109 | left = -EINTR; |
110 | } while (left > 0); |
111 | |
112 | if (!list_empty(head: &wait.entry)) |
113 | list_del(entry: &wait.entry); |
114 | else if (left <= 0 && waitqueue_active(wq_head: &m->q)) |
115 | __wake_up_locked_key(wq_head: &m->q, TASK_INTERRUPTIBLE, NULL); |
116 | __set_current_state(TASK_RUNNING); |
117 | |
118 | if (likely(left > 0)) |
119 | return 0; |
120 | |
121 | return left < 0 ? -EINTR : -ETIMEDOUT; |
122 | } |
123 | |
124 | static int get(struct slot_map *m) |
125 | { |
126 | int res = 0; |
127 | spin_lock(lock: &m->q.lock); |
128 | if (unlikely(m->c <= 0)) |
129 | res = wait_for_free(m); |
130 | if (likely(!res)) { |
131 | m->c--; |
132 | res = find_first_zero_bit(addr: m->map, size: m->count); |
133 | __set_bit(res, m->map); |
134 | } |
135 | spin_unlock(lock: &m->q.lock); |
136 | return res; |
137 | } |
138 | |
139 | /* used to describe mapped buffers */ |
140 | struct orangefs_bufmap_desc { |
141 | void __user *uaddr; /* user space address pointer */ |
142 | struct page **page_array; /* array of mapped pages */ |
143 | int array_count; /* size of above arrays */ |
144 | struct list_head list_link; |
145 | }; |
146 | |
147 | static struct orangefs_bufmap { |
148 | int desc_size; |
149 | int desc_shift; |
150 | int desc_count; |
151 | int total_size; |
152 | int page_count; |
153 | |
154 | struct page **page_array; |
155 | struct orangefs_bufmap_desc *desc_array; |
156 | |
157 | /* array to track usage of buffer descriptors */ |
158 | unsigned long *buffer_index_array; |
159 | |
160 | /* array to track usage of buffer descriptors for readdir */ |
161 | #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) |
162 | unsigned long readdir_index_array[N]; |
163 | #undef N |
164 | } *__orangefs_bufmap; |
165 | |
166 | static DEFINE_SPINLOCK(orangefs_bufmap_lock); |
167 | |
168 | static void |
169 | orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) |
170 | { |
171 | unpin_user_pages(pages: bufmap->page_array, npages: bufmap->page_count); |
172 | } |
173 | |
174 | static void |
175 | orangefs_bufmap_free(struct orangefs_bufmap *bufmap) |
176 | { |
177 | kfree(objp: bufmap->page_array); |
178 | kfree(objp: bufmap->desc_array); |
179 | bitmap_free(bitmap: bufmap->buffer_index_array); |
180 | kfree(objp: bufmap); |
181 | } |
182 | |
183 | /* |
184 | * XXX: Can the size and shift change while the caller gives up the |
185 | * XXX: lock between calling this and doing something useful? |
186 | */ |
187 | |
188 | int orangefs_bufmap_size_query(void) |
189 | { |
190 | struct orangefs_bufmap *bufmap; |
191 | int size = 0; |
192 | spin_lock(lock: &orangefs_bufmap_lock); |
193 | bufmap = __orangefs_bufmap; |
194 | if (bufmap) |
195 | size = bufmap->desc_size; |
196 | spin_unlock(lock: &orangefs_bufmap_lock); |
197 | return size; |
198 | } |
199 | |
200 | int orangefs_bufmap_shift_query(void) |
201 | { |
202 | struct orangefs_bufmap *bufmap; |
203 | int shift = 0; |
204 | spin_lock(lock: &orangefs_bufmap_lock); |
205 | bufmap = __orangefs_bufmap; |
206 | if (bufmap) |
207 | shift = bufmap->desc_shift; |
208 | spin_unlock(lock: &orangefs_bufmap_lock); |
209 | return shift; |
210 | } |
211 | |
212 | static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); |
213 | static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); |
214 | |
215 | static struct orangefs_bufmap * |
216 | orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) |
217 | { |
218 | struct orangefs_bufmap *bufmap; |
219 | |
220 | bufmap = kzalloc(size: sizeof(*bufmap), GFP_KERNEL); |
221 | if (!bufmap) |
222 | goto out; |
223 | |
224 | bufmap->total_size = user_desc->total_size; |
225 | bufmap->desc_count = user_desc->count; |
226 | bufmap->desc_size = user_desc->size; |
227 | bufmap->desc_shift = ilog2(bufmap->desc_size); |
228 | |
229 | bufmap->buffer_index_array = bitmap_zalloc(nbits: bufmap->desc_count, GFP_KERNEL); |
230 | if (!bufmap->buffer_index_array) |
231 | goto out_free_bufmap; |
232 | |
233 | bufmap->desc_array = |
234 | kcalloc(n: bufmap->desc_count, size: sizeof(struct orangefs_bufmap_desc), |
235 | GFP_KERNEL); |
236 | if (!bufmap->desc_array) |
237 | goto out_free_index_array; |
238 | |
239 | bufmap->page_count = bufmap->total_size / PAGE_SIZE; |
240 | |
241 | /* allocate storage to track our page mappings */ |
242 | bufmap->page_array = |
243 | kcalloc(n: bufmap->page_count, size: sizeof(struct page *), GFP_KERNEL); |
244 | if (!bufmap->page_array) |
245 | goto out_free_desc_array; |
246 | |
247 | return bufmap; |
248 | |
249 | out_free_desc_array: |
250 | kfree(objp: bufmap->desc_array); |
251 | out_free_index_array: |
252 | bitmap_free(bitmap: bufmap->buffer_index_array); |
253 | out_free_bufmap: |
254 | kfree(objp: bufmap); |
255 | out: |
256 | return NULL; |
257 | } |
258 | |
259 | static int |
260 | orangefs_bufmap_map(struct orangefs_bufmap *bufmap, |
261 | struct ORANGEFS_dev_map_desc *user_desc) |
262 | { |
263 | int pages_per_desc = bufmap->desc_size / PAGE_SIZE; |
264 | int offset = 0, ret, i; |
265 | |
266 | /* map the pages */ |
267 | ret = pin_user_pages_fast(start: (unsigned long)user_desc->ptr, |
268 | nr_pages: bufmap->page_count, gup_flags: FOLL_WRITE, pages: bufmap->page_array); |
269 | |
270 | if (ret < 0) |
271 | return ret; |
272 | |
273 | if (ret != bufmap->page_count) { |
274 | gossip_err("orangefs error: asked for %d pages, only got %d.\n" , |
275 | bufmap->page_count, ret); |
276 | |
277 | for (i = 0; i < ret; i++) { |
278 | SetPageError(bufmap->page_array[i]); |
279 | unpin_user_page(page: bufmap->page_array[i]); |
280 | } |
281 | return -ENOMEM; |
282 | } |
283 | |
284 | /* |
285 | * ideally we want to get kernel space pointers for each page, but |
286 | * we can't kmap that many pages at once if highmem is being used. |
287 | * so instead, we just kmap/kunmap the page address each time the |
288 | * kaddr is needed. |
289 | */ |
290 | for (i = 0; i < bufmap->page_count; i++) |
291 | flush_dcache_page(page: bufmap->page_array[i]); |
292 | |
293 | /* build a list of available descriptors */ |
294 | for (offset = 0, i = 0; i < bufmap->desc_count; i++) { |
295 | bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; |
296 | bufmap->desc_array[i].array_count = pages_per_desc; |
297 | bufmap->desc_array[i].uaddr = |
298 | (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); |
299 | offset += pages_per_desc; |
300 | } |
301 | |
302 | return 0; |
303 | } |
304 | |
305 | /* |
306 | * orangefs_bufmap_initialize() |
307 | * |
308 | * initializes the mapped buffer interface |
309 | * |
310 | * returns 0 on success, -errno on failure |
311 | */ |
312 | int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) |
313 | { |
314 | struct orangefs_bufmap *bufmap; |
315 | int ret = -EINVAL; |
316 | |
317 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
318 | "orangefs_bufmap_initialize: called (ptr (" |
319 | "%p) sz (%d) cnt(%d).\n" , |
320 | user_desc->ptr, |
321 | user_desc->size, |
322 | user_desc->count); |
323 | |
324 | if (user_desc->total_size < 0 || |
325 | user_desc->size < 0 || |
326 | user_desc->count < 0) |
327 | goto out; |
328 | |
329 | /* |
330 | * sanity check alignment and size of buffer that caller wants to |
331 | * work with |
332 | */ |
333 | if (PAGE_ALIGN((unsigned long)user_desc->ptr) != |
334 | (unsigned long)user_desc->ptr) { |
335 | gossip_err("orangefs error: memory alignment (front). %p\n" , |
336 | user_desc->ptr); |
337 | goto out; |
338 | } |
339 | |
340 | if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) |
341 | != (unsigned long)(user_desc->ptr + user_desc->total_size)) { |
342 | gossip_err("orangefs error: memory alignment (back).(%p + %d)\n" , |
343 | user_desc->ptr, |
344 | user_desc->total_size); |
345 | goto out; |
346 | } |
347 | |
348 | if (user_desc->total_size != (user_desc->size * user_desc->count)) { |
349 | gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n" , |
350 | user_desc->total_size, |
351 | user_desc->size, |
352 | user_desc->count); |
353 | goto out; |
354 | } |
355 | |
356 | if ((user_desc->size % PAGE_SIZE) != 0) { |
357 | gossip_err("orangefs error: bufmap size not page size divisible (%d).\n" , |
358 | user_desc->size); |
359 | goto out; |
360 | } |
361 | |
362 | ret = -ENOMEM; |
363 | bufmap = orangefs_bufmap_alloc(user_desc); |
364 | if (!bufmap) |
365 | goto out; |
366 | |
367 | ret = orangefs_bufmap_map(bufmap, user_desc); |
368 | if (ret) |
369 | goto out_free_bufmap; |
370 | |
371 | |
372 | spin_lock(lock: &orangefs_bufmap_lock); |
373 | if (__orangefs_bufmap) { |
374 | spin_unlock(lock: &orangefs_bufmap_lock); |
375 | gossip_err("orangefs: error: bufmap already initialized.\n" ); |
376 | ret = -EINVAL; |
377 | goto out_unmap_bufmap; |
378 | } |
379 | __orangefs_bufmap = bufmap; |
380 | install(m: &rw_map, |
381 | count: bufmap->desc_count, |
382 | map: bufmap->buffer_index_array); |
383 | install(m: &readdir_map, |
384 | ORANGEFS_READDIR_DEFAULT_DESC_COUNT, |
385 | map: bufmap->readdir_index_array); |
386 | spin_unlock(lock: &orangefs_bufmap_lock); |
387 | |
388 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
389 | "orangefs_bufmap_initialize: exiting normally\n" ); |
390 | return 0; |
391 | |
392 | out_unmap_bufmap: |
393 | orangefs_bufmap_unmap(bufmap); |
394 | out_free_bufmap: |
395 | orangefs_bufmap_free(bufmap); |
396 | out: |
397 | return ret; |
398 | } |
399 | |
400 | /* |
401 | * orangefs_bufmap_finalize() |
402 | * |
403 | * shuts down the mapped buffer interface and releases any resources |
404 | * associated with it |
405 | * |
406 | * no return value |
407 | */ |
408 | void orangefs_bufmap_finalize(void) |
409 | { |
410 | struct orangefs_bufmap *bufmap = __orangefs_bufmap; |
411 | if (!bufmap) |
412 | return; |
413 | gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n" ); |
414 | mark_killed(m: &rw_map); |
415 | mark_killed(m: &readdir_map); |
416 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
417 | "orangefs_bufmap_finalize: exiting normally\n" ); |
418 | } |
419 | |
420 | void orangefs_bufmap_run_down(void) |
421 | { |
422 | struct orangefs_bufmap *bufmap = __orangefs_bufmap; |
423 | if (!bufmap) |
424 | return; |
425 | run_down(m: &rw_map); |
426 | run_down(m: &readdir_map); |
427 | spin_lock(lock: &orangefs_bufmap_lock); |
428 | __orangefs_bufmap = NULL; |
429 | spin_unlock(lock: &orangefs_bufmap_lock); |
430 | orangefs_bufmap_unmap(bufmap); |
431 | orangefs_bufmap_free(bufmap); |
432 | } |
433 | |
434 | /* |
435 | * orangefs_bufmap_get() |
436 | * |
437 | * gets a free mapped buffer descriptor, will sleep until one becomes |
438 | * available if necessary |
439 | * |
440 | * returns slot on success, -errno on failure |
441 | */ |
442 | int orangefs_bufmap_get(void) |
443 | { |
444 | return get(m: &rw_map); |
445 | } |
446 | |
447 | /* |
448 | * orangefs_bufmap_put() |
449 | * |
450 | * returns a mapped buffer descriptor to the collection |
451 | * |
452 | * no return value |
453 | */ |
454 | void orangefs_bufmap_put(int buffer_index) |
455 | { |
456 | put(m: &rw_map, slot: buffer_index); |
457 | } |
458 | |
459 | /* |
460 | * orangefs_readdir_index_get() |
461 | * |
462 | * gets a free descriptor, will sleep until one becomes |
463 | * available if necessary. |
464 | * Although the readdir buffers are not mapped into kernel space |
465 | * we could do that at a later point of time. Regardless, these |
466 | * indices are used by the client-core. |
467 | * |
468 | * returns slot on success, -errno on failure |
469 | */ |
470 | int orangefs_readdir_index_get(void) |
471 | { |
472 | return get(m: &readdir_map); |
473 | } |
474 | |
475 | void orangefs_readdir_index_put(int buffer_index) |
476 | { |
477 | put(m: &readdir_map, slot: buffer_index); |
478 | } |
479 | |
480 | /* |
481 | * we've been handed an iovec, we need to copy it to |
482 | * the shared memory descriptor at "buffer_index". |
483 | */ |
484 | int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, |
485 | int buffer_index, |
486 | size_t size) |
487 | { |
488 | struct orangefs_bufmap_desc *to; |
489 | int i; |
490 | |
491 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
492 | "%s: buffer_index:%d: size:%zu:\n" , |
493 | __func__, buffer_index, size); |
494 | |
495 | to = &__orangefs_bufmap->desc_array[buffer_index]; |
496 | for (i = 0; size; i++) { |
497 | struct page *page = to->page_array[i]; |
498 | size_t n = size; |
499 | if (n > PAGE_SIZE) |
500 | n = PAGE_SIZE; |
501 | if (copy_page_from_iter(page, offset: 0, bytes: n, i: iter) != n) |
502 | return -EFAULT; |
503 | size -= n; |
504 | } |
505 | return 0; |
506 | } |
507 | |
508 | /* |
509 | * we've been handed an iovec, we need to fill it from |
510 | * the shared memory descriptor at "buffer_index". |
511 | */ |
512 | int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, |
513 | int buffer_index, |
514 | size_t size) |
515 | { |
516 | struct orangefs_bufmap_desc *from; |
517 | int i; |
518 | |
519 | from = &__orangefs_bufmap->desc_array[buffer_index]; |
520 | gossip_debug(GOSSIP_BUFMAP_DEBUG, |
521 | "%s: buffer_index:%d: size:%zu:\n" , |
522 | __func__, buffer_index, size); |
523 | |
524 | |
525 | for (i = 0; size; i++) { |
526 | struct page *page = from->page_array[i]; |
527 | size_t n = size; |
528 | if (n > PAGE_SIZE) |
529 | n = PAGE_SIZE; |
530 | n = copy_page_to_iter(page, offset: 0, bytes: n, i: iter); |
531 | if (!n) |
532 | return -EFAULT; |
533 | size -= n; |
534 | } |
535 | return 0; |
536 | } |
537 | |
538 | void orangefs_bufmap_page_fill(void *page_to, |
539 | int buffer_index, |
540 | int slot_index) |
541 | { |
542 | struct orangefs_bufmap_desc *from; |
543 | void *page_from; |
544 | |
545 | from = &__orangefs_bufmap->desc_array[buffer_index]; |
546 | page_from = kmap_atomic(page: from->page_array[slot_index]); |
547 | memcpy(page_to, page_from, PAGE_SIZE); |
548 | kunmap_atomic(page_from); |
549 | } |
550 | |