1 | /* |
2 | * Copyright (c) 2007, 2020 Oracle and/or its affiliates. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/pagemap.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/rbtree.h> |
36 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ |
37 | |
38 | #include "rds.h" |
39 | |
40 | /* |
41 | * XXX |
42 | * - build with sparse |
43 | * - should we detect duplicate keys on a socket? hmm. |
44 | * - an rdma is an mlock, apply rlimit? |
45 | */ |
46 | |
47 | /* |
48 | * get the number of pages by looking at the page indices that the start and |
49 | * end addresses fall in. |
50 | * |
51 | * Returns 0 if the vec is invalid. It is invalid if the number of bytes |
52 | * causes the address to wrap or overflows an unsigned int. This comes |
53 | * from being stored in the 'length' member of 'struct scatterlist'. |
54 | */ |
55 | static unsigned int rds_pages_in_vec(struct rds_iovec *vec) |
56 | { |
57 | if ((vec->addr + vec->bytes <= vec->addr) || |
58 | (vec->bytes > (u64)UINT_MAX)) |
59 | return 0; |
60 | |
61 | return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - |
62 | (vec->addr >> PAGE_SHIFT); |
63 | } |
64 | |
65 | static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, |
66 | struct rds_mr *insert) |
67 | { |
68 | struct rb_node **p = &root->rb_node; |
69 | struct rb_node *parent = NULL; |
70 | struct rds_mr *mr; |
71 | |
72 | while (*p) { |
73 | parent = *p; |
74 | mr = rb_entry(parent, struct rds_mr, r_rb_node); |
75 | |
76 | if (key < mr->r_key) |
77 | p = &(*p)->rb_left; |
78 | else if (key > mr->r_key) |
79 | p = &(*p)->rb_right; |
80 | else |
81 | return mr; |
82 | } |
83 | |
84 | if (insert) { |
85 | rb_link_node(node: &insert->r_rb_node, parent, rb_link: p); |
86 | rb_insert_color(&insert->r_rb_node, root); |
87 | kref_get(kref: &insert->r_kref); |
88 | } |
89 | return NULL; |
90 | } |
91 | |
92 | /* |
93 | * Destroy the transport-specific part of a MR. |
94 | */ |
95 | static void rds_destroy_mr(struct rds_mr *mr) |
96 | { |
97 | struct rds_sock *rs = mr->r_sock; |
98 | void *trans_private = NULL; |
99 | unsigned long flags; |
100 | |
101 | rdsdebug("RDS: destroy mr key is %x refcnt %u\n" , |
102 | mr->r_key, kref_read(&mr->r_kref)); |
103 | |
104 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
105 | if (!RB_EMPTY_NODE(&mr->r_rb_node)) |
106 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
107 | trans_private = mr->r_trans_private; |
108 | mr->r_trans_private = NULL; |
109 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
110 | |
111 | if (trans_private) |
112 | mr->r_trans->free_mr(trans_private, mr->r_invalidate); |
113 | } |
114 | |
115 | void __rds_put_mr_final(struct kref *kref) |
116 | { |
117 | struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref); |
118 | |
119 | rds_destroy_mr(mr); |
120 | kfree(objp: mr); |
121 | } |
122 | |
123 | /* |
124 | * By the time this is called we can't have any more ioctls called on |
125 | * the socket so we don't need to worry about racing with others. |
126 | */ |
127 | void rds_rdma_drop_keys(struct rds_sock *rs) |
128 | { |
129 | struct rds_mr *mr; |
130 | struct rb_node *node; |
131 | unsigned long flags; |
132 | |
133 | /* Release any MRs associated with this socket */ |
134 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
135 | while ((node = rb_first(&rs->rs_rdma_keys))) { |
136 | mr = rb_entry(node, struct rds_mr, r_rb_node); |
137 | if (mr->r_trans == rs->rs_transport) |
138 | mr->r_invalidate = 0; |
139 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
140 | RB_CLEAR_NODE(&mr->r_rb_node); |
141 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
142 | kref_put(kref: &mr->r_kref, release: __rds_put_mr_final); |
143 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
144 | } |
145 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
146 | |
147 | if (rs->rs_transport && rs->rs_transport->flush_mrs) |
148 | rs->rs_transport->flush_mrs(); |
149 | } |
150 | |
151 | /* |
152 | * Helper function to pin user pages. |
153 | */ |
154 | static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, |
155 | struct page **pages, int write) |
156 | { |
157 | unsigned int gup_flags = FOLL_LONGTERM; |
158 | int ret; |
159 | |
160 | if (write) |
161 | gup_flags |= FOLL_WRITE; |
162 | |
163 | ret = pin_user_pages_fast(start: user_addr, nr_pages, gup_flags, pages); |
164 | if (ret >= 0 && ret < nr_pages) { |
165 | unpin_user_pages(pages, npages: ret); |
166 | ret = -EFAULT; |
167 | } |
168 | |
169 | return ret; |
170 | } |
171 | |
172 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, |
173 | u64 *cookie_ret, struct rds_mr **mr_ret, |
174 | struct rds_conn_path *cp) |
175 | { |
176 | struct rds_mr *mr = NULL, *found; |
177 | struct scatterlist *sg = NULL; |
178 | unsigned int nr_pages; |
179 | struct page **pages = NULL; |
180 | void *trans_private; |
181 | unsigned long flags; |
182 | rds_rdma_cookie_t cookie; |
183 | unsigned int nents = 0; |
184 | int need_odp = 0; |
185 | long i; |
186 | int ret; |
187 | |
188 | if (ipv6_addr_any(a: &rs->rs_bound_addr) || !rs->rs_transport) { |
189 | ret = -ENOTCONN; /* XXX not a great errno */ |
190 | goto out; |
191 | } |
192 | |
193 | if (!rs->rs_transport->get_mr) { |
194 | ret = -EOPNOTSUPP; |
195 | goto out; |
196 | } |
197 | |
198 | /* If the combination of the addr and size requested for this memory |
199 | * region causes an integer overflow, return error. |
200 | */ |
201 | if (((args->vec.addr + args->vec.bytes) < args->vec.addr) || |
202 | PAGE_ALIGN(args->vec.addr + args->vec.bytes) < |
203 | (args->vec.addr + args->vec.bytes)) { |
204 | ret = -EINVAL; |
205 | goto out; |
206 | } |
207 | |
208 | if (!can_do_mlock()) { |
209 | ret = -EPERM; |
210 | goto out; |
211 | } |
212 | |
213 | nr_pages = rds_pages_in_vec(vec: &args->vec); |
214 | if (nr_pages == 0) { |
215 | ret = -EINVAL; |
216 | goto out; |
217 | } |
218 | |
219 | /* Restrict the size of mr irrespective of underlying transport |
220 | * To account for unaligned mr regions, subtract one from nr_pages |
221 | */ |
222 | if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { |
223 | ret = -EMSGSIZE; |
224 | goto out; |
225 | } |
226 | |
227 | rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n" , |
228 | args->vec.addr, args->vec.bytes, nr_pages); |
229 | |
230 | /* XXX clamp nr_pages to limit the size of this alloc? */ |
231 | pages = kcalloc(n: nr_pages, size: sizeof(struct page *), GFP_KERNEL); |
232 | if (!pages) { |
233 | ret = -ENOMEM; |
234 | goto out; |
235 | } |
236 | |
237 | mr = kzalloc(size: sizeof(struct rds_mr), GFP_KERNEL); |
238 | if (!mr) { |
239 | ret = -ENOMEM; |
240 | goto out; |
241 | } |
242 | |
243 | kref_init(kref: &mr->r_kref); |
244 | RB_CLEAR_NODE(&mr->r_rb_node); |
245 | mr->r_trans = rs->rs_transport; |
246 | mr->r_sock = rs; |
247 | |
248 | if (args->flags & RDS_RDMA_USE_ONCE) |
249 | mr->r_use_once = 1; |
250 | if (args->flags & RDS_RDMA_INVALIDATE) |
251 | mr->r_invalidate = 1; |
252 | if (args->flags & RDS_RDMA_READWRITE) |
253 | mr->r_write = 1; |
254 | |
255 | /* |
256 | * Pin the pages that make up the user buffer and transfer the page |
257 | * pointers to the mr's sg array. We check to see if we've mapped |
258 | * the whole region after transferring the partial page references |
259 | * to the sg array so that we can have one page ref cleanup path. |
260 | * |
261 | * For now we have no flag that tells us whether the mapping is |
262 | * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to |
263 | * the zero page. |
264 | */ |
265 | ret = rds_pin_pages(user_addr: args->vec.addr, nr_pages, pages, write: 1); |
266 | if (ret == -EOPNOTSUPP) { |
267 | need_odp = 1; |
268 | } else if (ret <= 0) { |
269 | goto out; |
270 | } else { |
271 | nents = ret; |
272 | sg = kmalloc_array(n: nents, size: sizeof(*sg), GFP_KERNEL); |
273 | if (!sg) { |
274 | ret = -ENOMEM; |
275 | goto out; |
276 | } |
277 | WARN_ON(!nents); |
278 | sg_init_table(sg, nents); |
279 | |
280 | /* Stick all pages into the scatterlist */ |
281 | for (i = 0 ; i < nents; i++) |
282 | sg_set_page(sg: &sg[i], page: pages[i], PAGE_SIZE, offset: 0); |
283 | |
284 | rdsdebug("RDS: trans_private nents is %u\n" , nents); |
285 | } |
286 | /* Obtain a transport specific MR. If this succeeds, the |
287 | * s/g list is now owned by the MR. |
288 | * Note that dma_map() implies that pending writes are |
289 | * flushed to RAM, so no dma_sync is needed here. */ |
290 | trans_private = rs->rs_transport->get_mr( |
291 | sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL, |
292 | args->vec.addr, args->vec.bytes, |
293 | need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED); |
294 | |
295 | if (IS_ERR(ptr: trans_private)) { |
296 | /* In ODP case, we don't GUP pages, so don't need |
297 | * to release anything. |
298 | */ |
299 | if (!need_odp) { |
300 | unpin_user_pages(pages, npages: nr_pages); |
301 | kfree(objp: sg); |
302 | } |
303 | ret = PTR_ERR(ptr: trans_private); |
304 | goto out; |
305 | } |
306 | |
307 | mr->r_trans_private = trans_private; |
308 | |
309 | rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n" , |
310 | mr->r_key, (void *)(unsigned long) args->cookie_addr); |
311 | |
312 | /* The user may pass us an unaligned address, but we can only |
313 | * map page aligned regions. So we keep the offset, and build |
314 | * a 64bit cookie containing <R_Key, offset> and pass that |
315 | * around. */ |
316 | if (need_odp) |
317 | cookie = rds_rdma_make_cookie(r_key: mr->r_key, offset: 0); |
318 | else |
319 | cookie = rds_rdma_make_cookie(r_key: mr->r_key, |
320 | offset: args->vec.addr & ~PAGE_MASK); |
321 | if (cookie_ret) |
322 | *cookie_ret = cookie; |
323 | |
324 | if (args->cookie_addr && |
325 | put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) { |
326 | if (!need_odp) { |
327 | unpin_user_pages(pages, npages: nr_pages); |
328 | kfree(objp: sg); |
329 | } |
330 | ret = -EFAULT; |
331 | goto out; |
332 | } |
333 | |
334 | /* Inserting the new MR into the rbtree bumps its |
335 | * reference count. */ |
336 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
337 | found = rds_mr_tree_walk(root: &rs->rs_rdma_keys, key: mr->r_key, insert: mr); |
338 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
339 | |
340 | BUG_ON(found && found != mr); |
341 | |
342 | rdsdebug("RDS: get_mr key is %x\n" , mr->r_key); |
343 | if (mr_ret) { |
344 | kref_get(kref: &mr->r_kref); |
345 | *mr_ret = mr; |
346 | } |
347 | |
348 | ret = 0; |
349 | out: |
350 | kfree(objp: pages); |
351 | if (mr) |
352 | kref_put(kref: &mr->r_kref, release: __rds_put_mr_final); |
353 | return ret; |
354 | } |
355 | |
356 | int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen) |
357 | { |
358 | struct rds_get_mr_args args; |
359 | |
360 | if (optlen != sizeof(struct rds_get_mr_args)) |
361 | return -EINVAL; |
362 | |
363 | if (copy_from_sockptr(dst: &args, src: optval, size: sizeof(struct rds_get_mr_args))) |
364 | return -EFAULT; |
365 | |
366 | return __rds_rdma_map(rs, args: &args, NULL, NULL, NULL); |
367 | } |
368 | |
369 | int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen) |
370 | { |
371 | struct rds_get_mr_for_dest_args args; |
372 | struct rds_get_mr_args new_args; |
373 | |
374 | if (optlen != sizeof(struct rds_get_mr_for_dest_args)) |
375 | return -EINVAL; |
376 | |
377 | if (copy_from_sockptr(dst: &args, src: optval, |
378 | size: sizeof(struct rds_get_mr_for_dest_args))) |
379 | return -EFAULT; |
380 | |
381 | /* |
382 | * Initially, just behave like get_mr(). |
383 | * TODO: Implement get_mr as wrapper around this |
384 | * and deprecate it. |
385 | */ |
386 | new_args.vec = args.vec; |
387 | new_args.cookie_addr = args.cookie_addr; |
388 | new_args.flags = args.flags; |
389 | |
390 | return __rds_rdma_map(rs, args: &new_args, NULL, NULL, NULL); |
391 | } |
392 | |
393 | /* |
394 | * Free the MR indicated by the given R_Key |
395 | */ |
396 | int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen) |
397 | { |
398 | struct rds_free_mr_args args; |
399 | struct rds_mr *mr; |
400 | unsigned long flags; |
401 | |
402 | if (optlen != sizeof(struct rds_free_mr_args)) |
403 | return -EINVAL; |
404 | |
405 | if (copy_from_sockptr(dst: &args, src: optval, size: sizeof(struct rds_free_mr_args))) |
406 | return -EFAULT; |
407 | |
408 | /* Special case - a null cookie means flush all unused MRs */ |
409 | if (args.cookie == 0) { |
410 | if (!rs->rs_transport || !rs->rs_transport->flush_mrs) |
411 | return -EINVAL; |
412 | rs->rs_transport->flush_mrs(); |
413 | return 0; |
414 | } |
415 | |
416 | /* Look up the MR given its R_key and remove it from the rbtree |
417 | * so nobody else finds it. |
418 | * This should also prevent races with rds_rdma_unuse. |
419 | */ |
420 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
421 | mr = rds_mr_tree_walk(root: &rs->rs_rdma_keys, key: rds_rdma_cookie_key(cookie: args.cookie), NULL); |
422 | if (mr) { |
423 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
424 | RB_CLEAR_NODE(&mr->r_rb_node); |
425 | if (args.flags & RDS_RDMA_INVALIDATE) |
426 | mr->r_invalidate = 1; |
427 | } |
428 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
429 | |
430 | if (!mr) |
431 | return -EINVAL; |
432 | |
433 | kref_put(kref: &mr->r_kref, release: __rds_put_mr_final); |
434 | return 0; |
435 | } |
436 | |
437 | /* |
438 | * This is called when we receive an extension header that |
439 | * tells us this MR was used. It allows us to implement |
440 | * use_once semantics |
441 | */ |
442 | void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) |
443 | { |
444 | struct rds_mr *mr; |
445 | unsigned long flags; |
446 | int zot_me = 0; |
447 | |
448 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
449 | mr = rds_mr_tree_walk(root: &rs->rs_rdma_keys, key: r_key, NULL); |
450 | if (!mr) { |
451 | pr_debug("rds: trying to unuse MR with unknown r_key %u!\n" , |
452 | r_key); |
453 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
454 | return; |
455 | } |
456 | |
457 | /* Get a reference so that the MR won't go away before calling |
458 | * sync_mr() below. |
459 | */ |
460 | kref_get(kref: &mr->r_kref); |
461 | |
462 | /* If it is going to be freed, remove it from the tree now so |
463 | * that no other thread can find it and free it. |
464 | */ |
465 | if (mr->r_use_once || force) { |
466 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
467 | RB_CLEAR_NODE(&mr->r_rb_node); |
468 | zot_me = 1; |
469 | } |
470 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
471 | |
472 | /* May have to issue a dma_sync on this memory region. |
473 | * Note we could avoid this if the operation was a RDMA READ, |
474 | * but at this point we can't tell. */ |
475 | if (mr->r_trans->sync_mr) |
476 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); |
477 | |
478 | /* Release the reference held above. */ |
479 | kref_put(kref: &mr->r_kref, release: __rds_put_mr_final); |
480 | |
481 | /* If the MR was marked as invalidate, this will |
482 | * trigger an async flush. */ |
483 | if (zot_me) |
484 | kref_put(kref: &mr->r_kref, release: __rds_put_mr_final); |
485 | } |
486 | |
487 | void rds_rdma_free_op(struct rm_rdma_op *ro) |
488 | { |
489 | unsigned int i; |
490 | |
491 | if (ro->op_odp_mr) { |
492 | kref_put(kref: &ro->op_odp_mr->r_kref, release: __rds_put_mr_final); |
493 | } else { |
494 | for (i = 0; i < ro->op_nents; i++) { |
495 | struct page *page = sg_page(sg: &ro->op_sg[i]); |
496 | |
497 | /* Mark page dirty if it was possibly modified, which |
498 | * is the case for a RDMA_READ which copies from remote |
499 | * to local memory |
500 | */ |
501 | unpin_user_pages_dirty_lock(pages: &page, npages: 1, make_dirty: !ro->op_write); |
502 | } |
503 | } |
504 | |
505 | kfree(objp: ro->op_notifier); |
506 | ro->op_notifier = NULL; |
507 | ro->op_active = 0; |
508 | ro->op_odp_mr = NULL; |
509 | } |
510 | |
511 | void rds_atomic_free_op(struct rm_atomic_op *ao) |
512 | { |
513 | struct page *page = sg_page(sg: ao->op_sg); |
514 | |
515 | /* Mark page dirty if it was possibly modified, which |
516 | * is the case for a RDMA_READ which copies from remote |
517 | * to local memory */ |
518 | unpin_user_pages_dirty_lock(pages: &page, npages: 1, make_dirty: true); |
519 | |
520 | kfree(objp: ao->op_notifier); |
521 | ao->op_notifier = NULL; |
522 | ao->op_active = 0; |
523 | } |
524 | |
525 | |
526 | /* |
527 | * Count the number of pages needed to describe an incoming iovec array. |
528 | */ |
529 | static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) |
530 | { |
531 | int tot_pages = 0; |
532 | unsigned int nr_pages; |
533 | unsigned int i; |
534 | |
535 | /* figure out the number of pages in the vector */ |
536 | for (i = 0; i < nr_iovecs; i++) { |
537 | nr_pages = rds_pages_in_vec(vec: &iov[i]); |
538 | if (nr_pages == 0) |
539 | return -EINVAL; |
540 | |
541 | tot_pages += nr_pages; |
542 | |
543 | /* |
544 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, |
545 | * so tot_pages cannot overflow without first going negative. |
546 | */ |
547 | if (tot_pages < 0) |
548 | return -EINVAL; |
549 | } |
550 | |
551 | return tot_pages; |
552 | } |
553 | |
554 | int (struct rds_rdma_args *args, |
555 | struct rds_iov_vector *iov) |
556 | { |
557 | struct rds_iovec *vec; |
558 | struct rds_iovec __user *local_vec; |
559 | int tot_pages = 0; |
560 | unsigned int nr_pages; |
561 | unsigned int i; |
562 | |
563 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; |
564 | |
565 | if (args->nr_local == 0) |
566 | return -EINVAL; |
567 | |
568 | if (args->nr_local > UIO_MAXIOV) |
569 | return -EMSGSIZE; |
570 | |
571 | iov->iov = kcalloc(n: args->nr_local, |
572 | size: sizeof(struct rds_iovec), |
573 | GFP_KERNEL); |
574 | if (!iov->iov) |
575 | return -ENOMEM; |
576 | |
577 | vec = &iov->iov[0]; |
578 | |
579 | if (copy_from_user(to: vec, from: local_vec, n: args->nr_local * |
580 | sizeof(struct rds_iovec))) |
581 | return -EFAULT; |
582 | iov->len = args->nr_local; |
583 | |
584 | /* figure out the number of pages in the vector */ |
585 | for (i = 0; i < args->nr_local; i++, vec++) { |
586 | |
587 | nr_pages = rds_pages_in_vec(vec); |
588 | if (nr_pages == 0) |
589 | return -EINVAL; |
590 | |
591 | tot_pages += nr_pages; |
592 | |
593 | /* |
594 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, |
595 | * so tot_pages cannot overflow without first going negative. |
596 | */ |
597 | if (tot_pages < 0) |
598 | return -EINVAL; |
599 | } |
600 | |
601 | return tot_pages * sizeof(struct scatterlist); |
602 | } |
603 | |
604 | /* |
605 | * The application asks for a RDMA transfer. |
606 | * Extract all arguments and set up the rdma_op |
607 | */ |
608 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, |
609 | struct cmsghdr *cmsg, |
610 | struct rds_iov_vector *vec) |
611 | { |
612 | struct rds_rdma_args *args; |
613 | struct rm_rdma_op *op = &rm->rdma; |
614 | int nr_pages; |
615 | unsigned int nr_bytes; |
616 | struct page **pages = NULL; |
617 | struct rds_iovec *iovs; |
618 | unsigned int i, j; |
619 | int ret = 0; |
620 | bool odp_supported = true; |
621 | |
622 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) |
623 | || rm->rdma.op_active) |
624 | return -EINVAL; |
625 | |
626 | args = CMSG_DATA(cmsg); |
627 | |
628 | if (ipv6_addr_any(a: &rs->rs_bound_addr)) { |
629 | ret = -ENOTCONN; /* XXX not a great errno */ |
630 | goto out_ret; |
631 | } |
632 | |
633 | if (args->nr_local > UIO_MAXIOV) { |
634 | ret = -EMSGSIZE; |
635 | goto out_ret; |
636 | } |
637 | |
638 | if (vec->len != args->nr_local) { |
639 | ret = -EINVAL; |
640 | goto out_ret; |
641 | } |
642 | /* odp-mr is not supported for multiple requests within one message */ |
643 | if (args->nr_local != 1) |
644 | odp_supported = false; |
645 | |
646 | iovs = vec->iov; |
647 | |
648 | nr_pages = rds_rdma_pages(iov: iovs, nr_iovecs: args->nr_local); |
649 | if (nr_pages < 0) { |
650 | ret = -EINVAL; |
651 | goto out_ret; |
652 | } |
653 | |
654 | pages = kcalloc(n: nr_pages, size: sizeof(struct page *), GFP_KERNEL); |
655 | if (!pages) { |
656 | ret = -ENOMEM; |
657 | goto out_ret; |
658 | } |
659 | |
660 | op->op_write = !!(args->flags & RDS_RDMA_READWRITE); |
661 | op->op_fence = !!(args->flags & RDS_RDMA_FENCE); |
662 | op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
663 | op->op_silent = !!(args->flags & RDS_RDMA_SILENT); |
664 | op->op_active = 1; |
665 | op->op_recverr = rs->rs_recverr; |
666 | op->op_odp_mr = NULL; |
667 | |
668 | WARN_ON(!nr_pages); |
669 | op->op_sg = rds_message_alloc_sgs(rm, nents: nr_pages); |
670 | if (IS_ERR(ptr: op->op_sg)) { |
671 | ret = PTR_ERR(ptr: op->op_sg); |
672 | goto out_pages; |
673 | } |
674 | |
675 | if (op->op_notify || op->op_recverr) { |
676 | /* We allocate an uninitialized notifier here, because |
677 | * we don't want to do that in the completion handler. We |
678 | * would have to use GFP_ATOMIC there, and don't want to deal |
679 | * with failed allocations. |
680 | */ |
681 | op->op_notifier = kmalloc(size: sizeof(struct rds_notifier), GFP_KERNEL); |
682 | if (!op->op_notifier) { |
683 | ret = -ENOMEM; |
684 | goto out_pages; |
685 | } |
686 | op->op_notifier->n_user_token = args->user_token; |
687 | op->op_notifier->n_status = RDS_RDMA_SUCCESS; |
688 | } |
689 | |
690 | /* The cookie contains the R_Key of the remote memory region, and |
691 | * optionally an offset into it. This is how we implement RDMA into |
692 | * unaligned memory. |
693 | * When setting up the RDMA, we need to add that offset to the |
694 | * destination address (which is really an offset into the MR) |
695 | * FIXME: We may want to move this into ib_rdma.c |
696 | */ |
697 | op->op_rkey = rds_rdma_cookie_key(cookie: args->cookie); |
698 | op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(cookie: args->cookie); |
699 | |
700 | nr_bytes = 0; |
701 | |
702 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n" , |
703 | (unsigned long long)args->nr_local, |
704 | (unsigned long long)args->remote_vec.addr, |
705 | op->op_rkey); |
706 | |
707 | for (i = 0; i < args->nr_local; i++) { |
708 | struct rds_iovec *iov = &iovs[i]; |
709 | /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ |
710 | unsigned int nr = rds_pages_in_vec(vec: iov); |
711 | |
712 | rs->rs_user_addr = iov->addr; |
713 | rs->rs_user_bytes = iov->bytes; |
714 | |
715 | /* If it's a WRITE operation, we want to pin the pages for reading. |
716 | * If it's a READ operation, we need to pin the pages for writing. |
717 | */ |
718 | ret = rds_pin_pages(user_addr: iov->addr, nr_pages: nr, pages, write: !op->op_write); |
719 | if ((!odp_supported && ret <= 0) || |
720 | (odp_supported && ret <= 0 && ret != -EOPNOTSUPP)) |
721 | goto out_pages; |
722 | |
723 | if (ret == -EOPNOTSUPP) { |
724 | struct rds_mr *local_odp_mr; |
725 | |
726 | if (!rs->rs_transport->get_mr) { |
727 | ret = -EOPNOTSUPP; |
728 | goto out_pages; |
729 | } |
730 | local_odp_mr = |
731 | kzalloc(size: sizeof(*local_odp_mr), GFP_KERNEL); |
732 | if (!local_odp_mr) { |
733 | ret = -ENOMEM; |
734 | goto out_pages; |
735 | } |
736 | RB_CLEAR_NODE(&local_odp_mr->r_rb_node); |
737 | kref_init(kref: &local_odp_mr->r_kref); |
738 | local_odp_mr->r_trans = rs->rs_transport; |
739 | local_odp_mr->r_sock = rs; |
740 | local_odp_mr->r_trans_private = |
741 | rs->rs_transport->get_mr( |
742 | NULL, 0, rs, &local_odp_mr->r_key, NULL, |
743 | iov->addr, iov->bytes, ODP_VIRTUAL); |
744 | if (IS_ERR(ptr: local_odp_mr->r_trans_private)) { |
745 | ret = PTR_ERR(ptr: local_odp_mr->r_trans_private); |
746 | rdsdebug("get_mr ret %d %p\"" , ret, |
747 | local_odp_mr->r_trans_private); |
748 | kfree(objp: local_odp_mr); |
749 | ret = -EOPNOTSUPP; |
750 | goto out_pages; |
751 | } |
752 | rdsdebug("Need odp; local_odp_mr %p trans_private %p\n" , |
753 | local_odp_mr, local_odp_mr->r_trans_private); |
754 | op->op_odp_mr = local_odp_mr; |
755 | op->op_odp_addr = iov->addr; |
756 | } |
757 | |
758 | rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n" , |
759 | nr_bytes, nr, iov->bytes, iov->addr); |
760 | |
761 | nr_bytes += iov->bytes; |
762 | |
763 | for (j = 0; j < nr; j++) { |
764 | unsigned int offset = iov->addr & ~PAGE_MASK; |
765 | struct scatterlist *sg; |
766 | |
767 | sg = &op->op_sg[op->op_nents + j]; |
768 | sg_set_page(sg, page: pages[j], |
769 | min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), |
770 | offset); |
771 | |
772 | sg_dma_len(sg) = sg->length; |
773 | rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n" , |
774 | sg->offset, sg->length, iov->addr, iov->bytes); |
775 | |
776 | iov->addr += sg->length; |
777 | iov->bytes -= sg->length; |
778 | } |
779 | |
780 | op->op_nents += nr; |
781 | } |
782 | |
783 | if (nr_bytes > args->remote_vec.bytes) { |
784 | rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n" , |
785 | nr_bytes, |
786 | (unsigned int) args->remote_vec.bytes); |
787 | ret = -EINVAL; |
788 | goto out_pages; |
789 | } |
790 | op->op_bytes = nr_bytes; |
791 | ret = 0; |
792 | |
793 | out_pages: |
794 | kfree(objp: pages); |
795 | out_ret: |
796 | if (ret) |
797 | rds_rdma_free_op(ro: op); |
798 | else |
799 | rds_stats_inc(s_send_rdma); |
800 | |
801 | return ret; |
802 | } |
803 | |
804 | /* |
805 | * The application wants us to pass an RDMA destination (aka MR) |
806 | * to the remote |
807 | */ |
808 | int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, |
809 | struct cmsghdr *cmsg) |
810 | { |
811 | unsigned long flags; |
812 | struct rds_mr *mr; |
813 | u32 r_key; |
814 | int err = 0; |
815 | |
816 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || |
817 | rm->m_rdma_cookie != 0) |
818 | return -EINVAL; |
819 | |
820 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); |
821 | |
822 | /* We are reusing a previously mapped MR here. Most likely, the |
823 | * application has written to the buffer, so we need to explicitly |
824 | * flush those writes to RAM. Otherwise the HCA may not see them |
825 | * when doing a DMA from that buffer. |
826 | */ |
827 | r_key = rds_rdma_cookie_key(cookie: rm->m_rdma_cookie); |
828 | |
829 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
830 | mr = rds_mr_tree_walk(root: &rs->rs_rdma_keys, key: r_key, NULL); |
831 | if (!mr) |
832 | err = -EINVAL; /* invalid r_key */ |
833 | else |
834 | kref_get(kref: &mr->r_kref); |
835 | spin_unlock_irqrestore(lock: &rs->rs_rdma_lock, flags); |
836 | |
837 | if (mr) { |
838 | mr->r_trans->sync_mr(mr->r_trans_private, |
839 | DMA_TO_DEVICE); |
840 | rm->rdma.op_rdma_mr = mr; |
841 | } |
842 | return err; |
843 | } |
844 | |
845 | /* |
846 | * The application passes us an address range it wants to enable RDMA |
847 | * to/from. We map the area, and save the <R_Key,offset> pair |
848 | * in rm->m_rdma_cookie. This causes it to be sent along to the peer |
849 | * in an extension header. |
850 | */ |
851 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, |
852 | struct cmsghdr *cmsg) |
853 | { |
854 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || |
855 | rm->m_rdma_cookie != 0) |
856 | return -EINVAL; |
857 | |
858 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), cookie_ret: &rm->m_rdma_cookie, |
859 | mr_ret: &rm->rdma.op_rdma_mr, cp: rm->m_conn_path); |
860 | } |
861 | |
862 | /* |
863 | * Fill in rds_message for an atomic request. |
864 | */ |
865 | int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, |
866 | struct cmsghdr *cmsg) |
867 | { |
868 | struct page *page = NULL; |
869 | struct rds_atomic_args *args; |
870 | int ret = 0; |
871 | |
872 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) |
873 | || rm->atomic.op_active) |
874 | return -EINVAL; |
875 | |
876 | args = CMSG_DATA(cmsg); |
877 | |
878 | /* Nonmasked & masked cmsg ops converted to masked hw ops */ |
879 | switch (cmsg->cmsg_type) { |
880 | case RDS_CMSG_ATOMIC_FADD: |
881 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; |
882 | rm->atomic.op_m_fadd.add = args->fadd.add; |
883 | rm->atomic.op_m_fadd.nocarry_mask = 0; |
884 | break; |
885 | case RDS_CMSG_MASKED_ATOMIC_FADD: |
886 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; |
887 | rm->atomic.op_m_fadd.add = args->m_fadd.add; |
888 | rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; |
889 | break; |
890 | case RDS_CMSG_ATOMIC_CSWP: |
891 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; |
892 | rm->atomic.op_m_cswp.compare = args->cswp.compare; |
893 | rm->atomic.op_m_cswp.swap = args->cswp.swap; |
894 | rm->atomic.op_m_cswp.compare_mask = ~0; |
895 | rm->atomic.op_m_cswp.swap_mask = ~0; |
896 | break; |
897 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
898 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; |
899 | rm->atomic.op_m_cswp.compare = args->m_cswp.compare; |
900 | rm->atomic.op_m_cswp.swap = args->m_cswp.swap; |
901 | rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; |
902 | rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; |
903 | break; |
904 | default: |
905 | BUG(); /* should never happen */ |
906 | } |
907 | |
908 | rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
909 | rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); |
910 | rm->atomic.op_active = 1; |
911 | rm->atomic.op_recverr = rs->rs_recverr; |
912 | rm->atomic.op_sg = rds_message_alloc_sgs(rm, nents: 1); |
913 | if (IS_ERR(ptr: rm->atomic.op_sg)) { |
914 | ret = PTR_ERR(ptr: rm->atomic.op_sg); |
915 | goto err; |
916 | } |
917 | |
918 | /* verify 8 byte-aligned */ |
919 | if (args->local_addr & 0x7) { |
920 | ret = -EFAULT; |
921 | goto err; |
922 | } |
923 | |
924 | ret = rds_pin_pages(user_addr: args->local_addr, nr_pages: 1, pages: &page, write: 1); |
925 | if (ret != 1) |
926 | goto err; |
927 | ret = 0; |
928 | |
929 | sg_set_page(sg: rm->atomic.op_sg, page, len: 8, offset_in_page(args->local_addr)); |
930 | |
931 | if (rm->atomic.op_notify || rm->atomic.op_recverr) { |
932 | /* We allocate an uninitialized notifier here, because |
933 | * we don't want to do that in the completion handler. We |
934 | * would have to use GFP_ATOMIC there, and don't want to deal |
935 | * with failed allocations. |
936 | */ |
937 | rm->atomic.op_notifier = kmalloc(size: sizeof(*rm->atomic.op_notifier), GFP_KERNEL); |
938 | if (!rm->atomic.op_notifier) { |
939 | ret = -ENOMEM; |
940 | goto err; |
941 | } |
942 | |
943 | rm->atomic.op_notifier->n_user_token = args->user_token; |
944 | rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; |
945 | } |
946 | |
947 | rm->atomic.op_rkey = rds_rdma_cookie_key(cookie: args->cookie); |
948 | rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(cookie: args->cookie); |
949 | |
950 | return ret; |
951 | err: |
952 | if (page) |
953 | unpin_user_page(page); |
954 | rm->atomic.op_active = 0; |
955 | kfree(objp: rm->atomic.op_notifier); |
956 | |
957 | return ret; |
958 | } |
959 | |