1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* |
3 | * Copyright (c) 2005 Voltaire Inc. All rights reserved. |
4 | * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. |
5 | * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. |
6 | * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. |
7 | */ |
8 | |
9 | #include <linux/completion.h> |
10 | #include <linux/in.h> |
11 | #include <linux/in6.h> |
12 | #include <linux/mutex.h> |
13 | #include <linux/random.h> |
14 | #include <linux/rbtree.h> |
15 | #include <linux/igmp.h> |
16 | #include <linux/xarray.h> |
17 | #include <linux/inetdevice.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/module.h> |
20 | #include <net/route.h> |
21 | |
22 | #include <net/net_namespace.h> |
23 | #include <net/netns/generic.h> |
24 | #include <net/netevent.h> |
25 | #include <net/tcp.h> |
26 | #include <net/ipv6.h> |
27 | #include <net/ip_fib.h> |
28 | #include <net/ip6_route.h> |
29 | |
30 | #include <rdma/rdma_cm.h> |
31 | #include <rdma/rdma_cm_ib.h> |
32 | #include <rdma/rdma_netlink.h> |
33 | #include <rdma/ib.h> |
34 | #include <rdma/ib_cache.h> |
35 | #include <rdma/ib_cm.h> |
36 | #include <rdma/ib_sa.h> |
37 | #include <rdma/iw_cm.h> |
38 | |
39 | #include "core_priv.h" |
40 | #include "cma_priv.h" |
41 | #include "cma_trace.h" |
42 | |
43 | MODULE_AUTHOR("Sean Hefty" ); |
44 | MODULE_DESCRIPTION("Generic RDMA CM Agent" ); |
45 | MODULE_LICENSE("Dual BSD/GPL" ); |
46 | |
47 | #define CMA_CM_RESPONSE_TIMEOUT 20 |
48 | #define CMA_MAX_CM_RETRIES 15 |
49 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) |
50 | #define CMA_IBOE_PACKET_LIFETIME 16 |
51 | #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP |
52 | |
53 | static const char * const cma_events[] = { |
54 | [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved" , |
55 | [RDMA_CM_EVENT_ADDR_ERROR] = "address error" , |
56 | [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved " , |
57 | [RDMA_CM_EVENT_ROUTE_ERROR] = "route error" , |
58 | [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request" , |
59 | [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response" , |
60 | [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error" , |
61 | [RDMA_CM_EVENT_UNREACHABLE] = "unreachable" , |
62 | [RDMA_CM_EVENT_REJECTED] = "rejected" , |
63 | [RDMA_CM_EVENT_ESTABLISHED] = "established" , |
64 | [RDMA_CM_EVENT_DISCONNECTED] = "disconnected" , |
65 | [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal" , |
66 | [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join" , |
67 | [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error" , |
68 | [RDMA_CM_EVENT_ADDR_CHANGE] = "address change" , |
69 | [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit" , |
70 | }; |
71 | |
72 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
73 | enum ib_gid_type gid_type); |
74 | |
75 | const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) |
76 | { |
77 | size_t index = event; |
78 | |
79 | return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? |
80 | cma_events[index] : "unrecognized event" ; |
81 | } |
82 | EXPORT_SYMBOL(rdma_event_msg); |
83 | |
84 | const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, |
85 | int reason) |
86 | { |
87 | if (rdma_ib_or_roce(device: id->device, port_num: id->port_num)) |
88 | return ibcm_reject_msg(reason); |
89 | |
90 | if (rdma_protocol_iwarp(device: id->device, port_num: id->port_num)) |
91 | return iwcm_reject_msg(reason); |
92 | |
93 | WARN_ON_ONCE(1); |
94 | return "unrecognized transport" ; |
95 | } |
96 | EXPORT_SYMBOL(rdma_reject_msg); |
97 | |
98 | /** |
99 | * rdma_is_consumer_reject - return true if the consumer rejected the connect |
100 | * request. |
101 | * @id: Communication identifier that received the REJECT event. |
102 | * @reason: Value returned in the REJECT event status field. |
103 | */ |
104 | static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) |
105 | { |
106 | if (rdma_ib_or_roce(device: id->device, port_num: id->port_num)) |
107 | return reason == IB_CM_REJ_CONSUMER_DEFINED; |
108 | |
109 | if (rdma_protocol_iwarp(device: id->device, port_num: id->port_num)) |
110 | return reason == -ECONNREFUSED; |
111 | |
112 | WARN_ON_ONCE(1); |
113 | return false; |
114 | } |
115 | |
116 | const void *rdma_consumer_reject_data(struct rdma_cm_id *id, |
117 | struct rdma_cm_event *ev, u8 *data_len) |
118 | { |
119 | const void *p; |
120 | |
121 | if (rdma_is_consumer_reject(id, reason: ev->status)) { |
122 | *data_len = ev->param.conn.private_data_len; |
123 | p = ev->param.conn.private_data; |
124 | } else { |
125 | *data_len = 0; |
126 | p = NULL; |
127 | } |
128 | return p; |
129 | } |
130 | EXPORT_SYMBOL(rdma_consumer_reject_data); |
131 | |
132 | /** |
133 | * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. |
134 | * @id: Communication Identifier |
135 | */ |
136 | struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) |
137 | { |
138 | struct rdma_id_private *id_priv; |
139 | |
140 | id_priv = container_of(id, struct rdma_id_private, id); |
141 | if (id->device->node_type == RDMA_NODE_RNIC) |
142 | return id_priv->cm_id.iw; |
143 | return NULL; |
144 | } |
145 | EXPORT_SYMBOL(rdma_iw_cm_id); |
146 | |
147 | /** |
148 | * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. |
149 | * @res: rdma resource tracking entry pointer |
150 | */ |
151 | struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) |
152 | { |
153 | struct rdma_id_private *id_priv = |
154 | container_of(res, struct rdma_id_private, res); |
155 | |
156 | return &id_priv->id; |
157 | } |
158 | EXPORT_SYMBOL(rdma_res_to_id); |
159 | |
160 | static int cma_add_one(struct ib_device *device); |
161 | static void cma_remove_one(struct ib_device *device, void *client_data); |
162 | |
163 | static struct ib_client cma_client = { |
164 | .name = "cma" , |
165 | .add = cma_add_one, |
166 | .remove = cma_remove_one |
167 | }; |
168 | |
169 | static struct ib_sa_client sa_client; |
170 | static LIST_HEAD(dev_list); |
171 | static LIST_HEAD(listen_any_list); |
172 | static DEFINE_MUTEX(lock); |
173 | static struct rb_root id_table = RB_ROOT; |
174 | /* Serialize operations of id_table tree */ |
175 | static DEFINE_SPINLOCK(id_table_lock); |
176 | static struct workqueue_struct *cma_wq; |
177 | static unsigned int cma_pernet_id; |
178 | |
179 | struct cma_pernet { |
180 | struct xarray tcp_ps; |
181 | struct xarray udp_ps; |
182 | struct xarray ipoib_ps; |
183 | struct xarray ib_ps; |
184 | }; |
185 | |
186 | static struct cma_pernet *cma_pernet(struct net *net) |
187 | { |
188 | return net_generic(net, id: cma_pernet_id); |
189 | } |
190 | |
191 | static |
192 | struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) |
193 | { |
194 | struct cma_pernet *pernet = cma_pernet(net); |
195 | |
196 | switch (ps) { |
197 | case RDMA_PS_TCP: |
198 | return &pernet->tcp_ps; |
199 | case RDMA_PS_UDP: |
200 | return &pernet->udp_ps; |
201 | case RDMA_PS_IPOIB: |
202 | return &pernet->ipoib_ps; |
203 | case RDMA_PS_IB: |
204 | return &pernet->ib_ps; |
205 | default: |
206 | return NULL; |
207 | } |
208 | } |
209 | |
210 | struct id_table_entry { |
211 | struct list_head id_list; |
212 | struct rb_node rb_node; |
213 | }; |
214 | |
215 | struct cma_device { |
216 | struct list_head list; |
217 | struct ib_device *device; |
218 | struct completion comp; |
219 | refcount_t refcount; |
220 | struct list_head id_list; |
221 | enum ib_gid_type *default_gid_type; |
222 | u8 *default_roce_tos; |
223 | }; |
224 | |
225 | struct rdma_bind_list { |
226 | enum rdma_ucm_port_space ps; |
227 | struct hlist_head owners; |
228 | unsigned short port; |
229 | }; |
230 | |
231 | static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, |
232 | struct rdma_bind_list *bind_list, int snum) |
233 | { |
234 | struct xarray *xa = cma_pernet_xa(net, ps); |
235 | |
236 | return xa_insert(xa, index: snum, entry: bind_list, GFP_KERNEL); |
237 | } |
238 | |
239 | static struct rdma_bind_list *cma_ps_find(struct net *net, |
240 | enum rdma_ucm_port_space ps, int snum) |
241 | { |
242 | struct xarray *xa = cma_pernet_xa(net, ps); |
243 | |
244 | return xa_load(xa, index: snum); |
245 | } |
246 | |
247 | static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, |
248 | int snum) |
249 | { |
250 | struct xarray *xa = cma_pernet_xa(net, ps); |
251 | |
252 | xa_erase(xa, index: snum); |
253 | } |
254 | |
255 | enum { |
256 | CMA_OPTION_AFONLY, |
257 | }; |
258 | |
259 | void cma_dev_get(struct cma_device *cma_dev) |
260 | { |
261 | refcount_inc(r: &cma_dev->refcount); |
262 | } |
263 | |
264 | void cma_dev_put(struct cma_device *cma_dev) |
265 | { |
266 | if (refcount_dec_and_test(r: &cma_dev->refcount)) |
267 | complete(&cma_dev->comp); |
268 | } |
269 | |
270 | struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, |
271 | void *cookie) |
272 | { |
273 | struct cma_device *cma_dev; |
274 | struct cma_device *found_cma_dev = NULL; |
275 | |
276 | mutex_lock(&lock); |
277 | |
278 | list_for_each_entry(cma_dev, &dev_list, list) |
279 | if (filter(cma_dev->device, cookie)) { |
280 | found_cma_dev = cma_dev; |
281 | break; |
282 | } |
283 | |
284 | if (found_cma_dev) |
285 | cma_dev_get(cma_dev: found_cma_dev); |
286 | mutex_unlock(lock: &lock); |
287 | return found_cma_dev; |
288 | } |
289 | |
290 | int cma_get_default_gid_type(struct cma_device *cma_dev, |
291 | u32 port) |
292 | { |
293 | if (!rdma_is_port_valid(device: cma_dev->device, port)) |
294 | return -EINVAL; |
295 | |
296 | return cma_dev->default_gid_type[port - rdma_start_port(device: cma_dev->device)]; |
297 | } |
298 | |
299 | int cma_set_default_gid_type(struct cma_device *cma_dev, |
300 | u32 port, |
301 | enum ib_gid_type default_gid_type) |
302 | { |
303 | unsigned long supported_gids; |
304 | |
305 | if (!rdma_is_port_valid(device: cma_dev->device, port)) |
306 | return -EINVAL; |
307 | |
308 | if (default_gid_type == IB_GID_TYPE_IB && |
309 | rdma_protocol_roce_eth_encap(device: cma_dev->device, port_num: port)) |
310 | default_gid_type = IB_GID_TYPE_ROCE; |
311 | |
312 | supported_gids = roce_gid_type_mask_support(ib_dev: cma_dev->device, port); |
313 | |
314 | if (!(supported_gids & 1 << default_gid_type)) |
315 | return -EINVAL; |
316 | |
317 | cma_dev->default_gid_type[port - rdma_start_port(device: cma_dev->device)] = |
318 | default_gid_type; |
319 | |
320 | return 0; |
321 | } |
322 | |
323 | int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) |
324 | { |
325 | if (!rdma_is_port_valid(device: cma_dev->device, port)) |
326 | return -EINVAL; |
327 | |
328 | return cma_dev->default_roce_tos[port - rdma_start_port(device: cma_dev->device)]; |
329 | } |
330 | |
331 | int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, |
332 | u8 default_roce_tos) |
333 | { |
334 | if (!rdma_is_port_valid(device: cma_dev->device, port)) |
335 | return -EINVAL; |
336 | |
337 | cma_dev->default_roce_tos[port - rdma_start_port(device: cma_dev->device)] = |
338 | default_roce_tos; |
339 | |
340 | return 0; |
341 | } |
342 | struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) |
343 | { |
344 | return cma_dev->device; |
345 | } |
346 | |
347 | /* |
348 | * Device removal can occur at anytime, so we need extra handling to |
349 | * serialize notifying the user of device removal with other callbacks. |
350 | * We do this by disabling removal notification while a callback is in process, |
351 | * and reporting it after the callback completes. |
352 | */ |
353 | |
354 | struct cma_multicast { |
355 | struct rdma_id_private *id_priv; |
356 | union { |
357 | struct ib_sa_multicast *sa_mc; |
358 | struct { |
359 | struct work_struct work; |
360 | struct rdma_cm_event event; |
361 | } iboe_join; |
362 | }; |
363 | struct list_head list; |
364 | void *context; |
365 | struct sockaddr_storage addr; |
366 | u8 join_state; |
367 | }; |
368 | |
369 | struct cma_work { |
370 | struct work_struct work; |
371 | struct rdma_id_private *id; |
372 | enum rdma_cm_state old_state; |
373 | enum rdma_cm_state new_state; |
374 | struct rdma_cm_event event; |
375 | }; |
376 | |
377 | union cma_ip_addr { |
378 | struct in6_addr ip6; |
379 | struct { |
380 | __be32 pad[3]; |
381 | __be32 addr; |
382 | } ip4; |
383 | }; |
384 | |
385 | struct cma_hdr { |
386 | u8 cma_version; |
387 | u8 ip_version; /* IP version: 7:4 */ |
388 | __be16 port; |
389 | union cma_ip_addr src_addr; |
390 | union cma_ip_addr dst_addr; |
391 | }; |
392 | |
393 | #define CMA_VERSION 0x00 |
394 | |
395 | struct cma_req_info { |
396 | struct sockaddr_storage listen_addr_storage; |
397 | struct sockaddr_storage src_addr_storage; |
398 | struct ib_device *device; |
399 | union ib_gid local_gid; |
400 | __be64 service_id; |
401 | int port; |
402 | bool has_gid; |
403 | u16 pkey; |
404 | }; |
405 | |
406 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
407 | enum rdma_cm_state comp, enum rdma_cm_state exch) |
408 | { |
409 | unsigned long flags; |
410 | int ret; |
411 | |
412 | /* |
413 | * The FSM uses a funny double locking where state is protected by both |
414 | * the handler_mutex and the spinlock. State is not allowed to change |
415 | * to/from a handler_mutex protected value without also holding |
416 | * handler_mutex. |
417 | */ |
418 | if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) |
419 | lockdep_assert_held(&id_priv->handler_mutex); |
420 | |
421 | spin_lock_irqsave(&id_priv->lock, flags); |
422 | if ((ret = (id_priv->state == comp))) |
423 | id_priv->state = exch; |
424 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
425 | return ret; |
426 | } |
427 | |
428 | static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) |
429 | { |
430 | return hdr->ip_version >> 4; |
431 | } |
432 | |
433 | static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) |
434 | { |
435 | hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); |
436 | } |
437 | |
438 | static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) |
439 | { |
440 | return (struct sockaddr *)&id_priv->id.route.addr.src_addr; |
441 | } |
442 | |
443 | static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) |
444 | { |
445 | return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; |
446 | } |
447 | |
448 | static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) |
449 | { |
450 | struct in_device *in_dev = NULL; |
451 | |
452 | if (ndev) { |
453 | rtnl_lock(); |
454 | in_dev = __in_dev_get_rtnl(dev: ndev); |
455 | if (in_dev) { |
456 | if (join) |
457 | ip_mc_inc_group(in_dev, |
458 | addr: *(__be32 *)(mgid->raw + 12)); |
459 | else |
460 | ip_mc_dec_group(in_dev, |
461 | addr: *(__be32 *)(mgid->raw + 12)); |
462 | } |
463 | rtnl_unlock(); |
464 | } |
465 | return (in_dev) ? 0 : -ENODEV; |
466 | } |
467 | |
468 | static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, |
469 | struct id_table_entry *entry_b) |
470 | { |
471 | struct rdma_id_private *id_priv = list_first_entry( |
472 | &entry_b->id_list, struct rdma_id_private, id_list_entry); |
473 | int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; |
474 | struct sockaddr *sb = cma_dst_addr(id_priv); |
475 | |
476 | if (ifindex_a != ifindex_b) |
477 | return (ifindex_a > ifindex_b) ? 1 : -1; |
478 | |
479 | if (sa->sa_family != sb->sa_family) |
480 | return sa->sa_family - sb->sa_family; |
481 | |
482 | if (sa->sa_family == AF_INET && |
483 | __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { |
484 | return memcmp(p: &((struct sockaddr_in *)sa)->sin_addr, |
485 | q: &((struct sockaddr_in *)sb)->sin_addr, |
486 | size: sizeof(((struct sockaddr_in *)sa)->sin_addr)); |
487 | } |
488 | |
489 | if (sa->sa_family == AF_INET6 && |
490 | __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { |
491 | return ipv6_addr_cmp(a1: &((struct sockaddr_in6 *)sa)->sin6_addr, |
492 | a2: &((struct sockaddr_in6 *)sb)->sin6_addr); |
493 | } |
494 | |
495 | return -1; |
496 | } |
497 | |
498 | static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) |
499 | { |
500 | struct rb_node **new, *parent = NULL; |
501 | struct id_table_entry *this, *node; |
502 | unsigned long flags; |
503 | int result; |
504 | |
505 | node = kzalloc(size: sizeof(*node), GFP_KERNEL); |
506 | if (!node) |
507 | return -ENOMEM; |
508 | |
509 | spin_lock_irqsave(&id_table_lock, flags); |
510 | new = &id_table.rb_node; |
511 | while (*new) { |
512 | this = container_of(*new, struct id_table_entry, rb_node); |
513 | result = compare_netdev_and_ip( |
514 | ifindex_a: node_id_priv->id.route.addr.dev_addr.bound_dev_if, |
515 | sa: cma_dst_addr(id_priv: node_id_priv), entry_b: this); |
516 | |
517 | parent = *new; |
518 | if (result < 0) |
519 | new = &((*new)->rb_left); |
520 | else if (result > 0) |
521 | new = &((*new)->rb_right); |
522 | else { |
523 | list_add_tail(new: &node_id_priv->id_list_entry, |
524 | head: &this->id_list); |
525 | kfree(objp: node); |
526 | goto unlock; |
527 | } |
528 | } |
529 | |
530 | INIT_LIST_HEAD(list: &node->id_list); |
531 | list_add_tail(new: &node_id_priv->id_list_entry, head: &node->id_list); |
532 | |
533 | rb_link_node(node: &node->rb_node, parent, rb_link: new); |
534 | rb_insert_color(&node->rb_node, &id_table); |
535 | |
536 | unlock: |
537 | spin_unlock_irqrestore(lock: &id_table_lock, flags); |
538 | return 0; |
539 | } |
540 | |
541 | static struct id_table_entry * |
542 | node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) |
543 | { |
544 | struct rb_node *node = root->rb_node; |
545 | struct id_table_entry *data; |
546 | int result; |
547 | |
548 | while (node) { |
549 | data = container_of(node, struct id_table_entry, rb_node); |
550 | result = compare_netdev_and_ip(ifindex_a: ifindex, sa, entry_b: data); |
551 | if (result < 0) |
552 | node = node->rb_left; |
553 | else if (result > 0) |
554 | node = node->rb_right; |
555 | else |
556 | return data; |
557 | } |
558 | |
559 | return NULL; |
560 | } |
561 | |
562 | static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) |
563 | { |
564 | struct id_table_entry *data; |
565 | unsigned long flags; |
566 | |
567 | spin_lock_irqsave(&id_table_lock, flags); |
568 | if (list_empty(head: &id_priv->id_list_entry)) |
569 | goto out; |
570 | |
571 | data = node_from_ndev_ip(root: &id_table, |
572 | ifindex: id_priv->id.route.addr.dev_addr.bound_dev_if, |
573 | sa: cma_dst_addr(id_priv)); |
574 | if (!data) |
575 | goto out; |
576 | |
577 | list_del_init(entry: &id_priv->id_list_entry); |
578 | if (list_empty(head: &data->id_list)) { |
579 | rb_erase(&data->rb_node, &id_table); |
580 | kfree(objp: data); |
581 | } |
582 | out: |
583 | spin_unlock_irqrestore(lock: &id_table_lock, flags); |
584 | } |
585 | |
586 | static void _cma_attach_to_dev(struct rdma_id_private *id_priv, |
587 | struct cma_device *cma_dev) |
588 | { |
589 | cma_dev_get(cma_dev); |
590 | id_priv->cma_dev = cma_dev; |
591 | id_priv->id.device = cma_dev->device; |
592 | id_priv->id.route.addr.dev_addr.transport = |
593 | rdma_node_get_transport(node_type: cma_dev->device->node_type); |
594 | list_add_tail(new: &id_priv->device_item, head: &cma_dev->id_list); |
595 | |
596 | trace_cm_id_attach(id_priv, device: cma_dev->device); |
597 | } |
598 | |
599 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
600 | struct cma_device *cma_dev) |
601 | { |
602 | _cma_attach_to_dev(id_priv, cma_dev); |
603 | id_priv->gid_type = |
604 | cma_dev->default_gid_type[id_priv->id.port_num - |
605 | rdma_start_port(device: cma_dev->device)]; |
606 | } |
607 | |
608 | static void cma_release_dev(struct rdma_id_private *id_priv) |
609 | { |
610 | mutex_lock(&lock); |
611 | list_del_init(entry: &id_priv->device_item); |
612 | cma_dev_put(cma_dev: id_priv->cma_dev); |
613 | id_priv->cma_dev = NULL; |
614 | id_priv->id.device = NULL; |
615 | if (id_priv->id.route.addr.dev_addr.sgid_attr) { |
616 | rdma_put_gid_attr(attr: id_priv->id.route.addr.dev_addr.sgid_attr); |
617 | id_priv->id.route.addr.dev_addr.sgid_attr = NULL; |
618 | } |
619 | mutex_unlock(lock: &lock); |
620 | } |
621 | |
622 | static inline unsigned short cma_family(struct rdma_id_private *id_priv) |
623 | { |
624 | return id_priv->id.route.addr.src_addr.ss_family; |
625 | } |
626 | |
627 | static int cma_set_default_qkey(struct rdma_id_private *id_priv) |
628 | { |
629 | struct ib_sa_mcmember_rec rec; |
630 | int ret = 0; |
631 | |
632 | switch (id_priv->id.ps) { |
633 | case RDMA_PS_UDP: |
634 | case RDMA_PS_IB: |
635 | id_priv->qkey = RDMA_UDP_QKEY; |
636 | break; |
637 | case RDMA_PS_IPOIB: |
638 | ib_addr_get_mgid(dev_addr: &id_priv->id.route.addr.dev_addr, gid: &rec.mgid); |
639 | ret = ib_sa_get_mcmember_rec(device: id_priv->id.device, |
640 | port_num: id_priv->id.port_num, mgid: &rec.mgid, |
641 | rec: &rec); |
642 | if (!ret) |
643 | id_priv->qkey = be32_to_cpu(rec.qkey); |
644 | break; |
645 | default: |
646 | break; |
647 | } |
648 | return ret; |
649 | } |
650 | |
651 | static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) |
652 | { |
653 | if (!qkey || |
654 | (id_priv->qkey && (id_priv->qkey != qkey))) |
655 | return -EINVAL; |
656 | |
657 | id_priv->qkey = qkey; |
658 | return 0; |
659 | } |
660 | |
661 | static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) |
662 | { |
663 | dev_addr->dev_type = ARPHRD_INFINIBAND; |
664 | rdma_addr_set_sgid(dev_addr, gid: (union ib_gid *) &sib->sib_addr); |
665 | ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); |
666 | } |
667 | |
668 | static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) |
669 | { |
670 | int ret; |
671 | |
672 | if (addr->sa_family != AF_IB) { |
673 | ret = rdma_translate_ip(addr, dev_addr); |
674 | } else { |
675 | cma_translate_ib(sib: (struct sockaddr_ib *) addr, dev_addr); |
676 | ret = 0; |
677 | } |
678 | |
679 | return ret; |
680 | } |
681 | |
682 | static const struct ib_gid_attr * |
683 | cma_validate_port(struct ib_device *device, u32 port, |
684 | enum ib_gid_type gid_type, |
685 | union ib_gid *gid, |
686 | struct rdma_id_private *id_priv) |
687 | { |
688 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
689 | const struct ib_gid_attr *sgid_attr = ERR_PTR(error: -ENODEV); |
690 | int bound_if_index = dev_addr->bound_dev_if; |
691 | int dev_type = dev_addr->dev_type; |
692 | struct net_device *ndev = NULL; |
693 | |
694 | if (!rdma_dev_access_netns(device, net: id_priv->id.route.addr.dev_addr.net)) |
695 | goto out; |
696 | |
697 | if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port_num: port)) |
698 | goto out; |
699 | |
700 | if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port_num: port)) |
701 | goto out; |
702 | |
703 | /* |
704 | * For drivers that do not associate more than one net device with |
705 | * their gid tables, such as iWARP drivers, it is sufficient to |
706 | * return the first table entry. |
707 | * |
708 | * Other driver classes might be included in the future. |
709 | */ |
710 | if (rdma_protocol_iwarp(device, port_num: port)) { |
711 | sgid_attr = rdma_get_gid_attr(device, port_num: port, index: 0); |
712 | if (IS_ERR(ptr: sgid_attr)) |
713 | goto out; |
714 | |
715 | rcu_read_lock(); |
716 | ndev = rcu_dereference(sgid_attr->ndev); |
717 | if (!net_eq(net1: dev_net(dev: ndev), net2: dev_addr->net) || |
718 | ndev->ifindex != bound_if_index) |
719 | sgid_attr = ERR_PTR(error: -ENODEV); |
720 | rcu_read_unlock(); |
721 | goto out; |
722 | } |
723 | |
724 | if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port_num: port)) { |
725 | ndev = dev_get_by_index(net: dev_addr->net, ifindex: bound_if_index); |
726 | if (!ndev) |
727 | goto out; |
728 | } else { |
729 | gid_type = IB_GID_TYPE_IB; |
730 | } |
731 | |
732 | sgid_attr = rdma_find_gid_by_port(ib_dev: device, gid, gid_type, port, ndev); |
733 | dev_put(dev: ndev); |
734 | out: |
735 | return sgid_attr; |
736 | } |
737 | |
738 | static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, |
739 | const struct ib_gid_attr *sgid_attr) |
740 | { |
741 | WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); |
742 | id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; |
743 | } |
744 | |
745 | /** |
746 | * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute |
747 | * based on source ip address. |
748 | * @id_priv: cm_id which should be bound to cma device |
749 | * |
750 | * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute |
751 | * based on source IP address. It returns 0 on success or error code otherwise. |
752 | * It is applicable to active and passive side cm_id. |
753 | */ |
754 | static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) |
755 | { |
756 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
757 | const struct ib_gid_attr *sgid_attr; |
758 | union ib_gid gid, iboe_gid, *gidp; |
759 | struct cma_device *cma_dev; |
760 | enum ib_gid_type gid_type; |
761 | int ret = -ENODEV; |
762 | u32 port; |
763 | |
764 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
765 | id_priv->id.ps == RDMA_PS_IPOIB) |
766 | return -EINVAL; |
767 | |
768 | rdma_ip2gid(addr: (struct sockaddr *)&id_priv->id.route.addr.src_addr, |
769 | gid: &iboe_gid); |
770 | |
771 | memcpy(&gid, dev_addr->src_dev_addr + |
772 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
773 | |
774 | mutex_lock(&lock); |
775 | list_for_each_entry(cma_dev, &dev_list, list) { |
776 | rdma_for_each_port (cma_dev->device, port) { |
777 | gidp = rdma_protocol_roce(device: cma_dev->device, port_num: port) ? |
778 | &iboe_gid : &gid; |
779 | gid_type = cma_dev->default_gid_type[port - 1]; |
780 | sgid_attr = cma_validate_port(device: cma_dev->device, port, |
781 | gid_type, gid: gidp, id_priv); |
782 | if (!IS_ERR(ptr: sgid_attr)) { |
783 | id_priv->id.port_num = port; |
784 | cma_bind_sgid_attr(id_priv, sgid_attr); |
785 | cma_attach_to_dev(id_priv, cma_dev); |
786 | ret = 0; |
787 | goto out; |
788 | } |
789 | } |
790 | } |
791 | out: |
792 | mutex_unlock(lock: &lock); |
793 | return ret; |
794 | } |
795 | |
796 | /** |
797 | * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute |
798 | * @id_priv: cm id to bind to cma device |
799 | * @listen_id_priv: listener cm id to match against |
800 | * @req: Pointer to req structure containaining incoming |
801 | * request information |
802 | * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when |
803 | * rdma device matches for listen_id and incoming request. It also verifies |
804 | * that a GID table entry is present for the source address. |
805 | * Returns 0 on success, or returns error code otherwise. |
806 | */ |
807 | static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, |
808 | const struct rdma_id_private *listen_id_priv, |
809 | struct cma_req_info *req) |
810 | { |
811 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
812 | const struct ib_gid_attr *sgid_attr; |
813 | enum ib_gid_type gid_type; |
814 | union ib_gid gid; |
815 | |
816 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
817 | id_priv->id.ps == RDMA_PS_IPOIB) |
818 | return -EINVAL; |
819 | |
820 | if (rdma_protocol_roce(device: req->device, port_num: req->port)) |
821 | rdma_ip2gid(addr: (struct sockaddr *)&id_priv->id.route.addr.src_addr, |
822 | gid: &gid); |
823 | else |
824 | memcpy(&gid, dev_addr->src_dev_addr + |
825 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
826 | |
827 | gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; |
828 | sgid_attr = cma_validate_port(device: req->device, port: req->port, |
829 | gid_type, gid: &gid, id_priv); |
830 | if (IS_ERR(ptr: sgid_attr)) |
831 | return PTR_ERR(ptr: sgid_attr); |
832 | |
833 | id_priv->id.port_num = req->port; |
834 | cma_bind_sgid_attr(id_priv, sgid_attr); |
835 | /* Need to acquire lock to protect against reader |
836 | * of cma_dev->id_list such as cma_netdev_callback() and |
837 | * cma_process_remove(). |
838 | */ |
839 | mutex_lock(&lock); |
840 | cma_attach_to_dev(id_priv, cma_dev: listen_id_priv->cma_dev); |
841 | mutex_unlock(lock: &lock); |
842 | rdma_restrack_add(res: &id_priv->res); |
843 | return 0; |
844 | } |
845 | |
846 | static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, |
847 | const struct rdma_id_private *listen_id_priv) |
848 | { |
849 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
850 | const struct ib_gid_attr *sgid_attr; |
851 | struct cma_device *cma_dev; |
852 | enum ib_gid_type gid_type; |
853 | int ret = -ENODEV; |
854 | union ib_gid gid; |
855 | u32 port; |
856 | |
857 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
858 | id_priv->id.ps == RDMA_PS_IPOIB) |
859 | return -EINVAL; |
860 | |
861 | memcpy(&gid, dev_addr->src_dev_addr + |
862 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
863 | |
864 | mutex_lock(&lock); |
865 | |
866 | cma_dev = listen_id_priv->cma_dev; |
867 | port = listen_id_priv->id.port_num; |
868 | gid_type = listen_id_priv->gid_type; |
869 | sgid_attr = cma_validate_port(device: cma_dev->device, port, |
870 | gid_type, gid: &gid, id_priv); |
871 | if (!IS_ERR(ptr: sgid_attr)) { |
872 | id_priv->id.port_num = port; |
873 | cma_bind_sgid_attr(id_priv, sgid_attr); |
874 | ret = 0; |
875 | goto out; |
876 | } |
877 | |
878 | list_for_each_entry(cma_dev, &dev_list, list) { |
879 | rdma_for_each_port (cma_dev->device, port) { |
880 | if (listen_id_priv->cma_dev == cma_dev && |
881 | listen_id_priv->id.port_num == port) |
882 | continue; |
883 | |
884 | gid_type = cma_dev->default_gid_type[port - 1]; |
885 | sgid_attr = cma_validate_port(device: cma_dev->device, port, |
886 | gid_type, gid: &gid, id_priv); |
887 | if (!IS_ERR(ptr: sgid_attr)) { |
888 | id_priv->id.port_num = port; |
889 | cma_bind_sgid_attr(id_priv, sgid_attr); |
890 | ret = 0; |
891 | goto out; |
892 | } |
893 | } |
894 | } |
895 | |
896 | out: |
897 | if (!ret) { |
898 | cma_attach_to_dev(id_priv, cma_dev); |
899 | rdma_restrack_add(res: &id_priv->res); |
900 | } |
901 | |
902 | mutex_unlock(lock: &lock); |
903 | return ret; |
904 | } |
905 | |
906 | /* |
907 | * Select the source IB device and address to reach the destination IB address. |
908 | */ |
909 | static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) |
910 | { |
911 | struct cma_device *cma_dev, *cur_dev; |
912 | struct sockaddr_ib *addr; |
913 | union ib_gid gid, sgid, *dgid; |
914 | unsigned int p; |
915 | u16 pkey, index; |
916 | enum ib_port_state port_state; |
917 | int ret; |
918 | int i; |
919 | |
920 | cma_dev = NULL; |
921 | addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); |
922 | dgid = (union ib_gid *) &addr->sib_addr; |
923 | pkey = ntohs(addr->sib_pkey); |
924 | |
925 | mutex_lock(&lock); |
926 | list_for_each_entry(cur_dev, &dev_list, list) { |
927 | rdma_for_each_port (cur_dev->device, p) { |
928 | if (!rdma_cap_af_ib(device: cur_dev->device, port_num: p)) |
929 | continue; |
930 | |
931 | if (ib_find_cached_pkey(device: cur_dev->device, port_num: p, pkey, index: &index)) |
932 | continue; |
933 | |
934 | if (ib_get_cached_port_state(device: cur_dev->device, port_num: p, port_active: &port_state)) |
935 | continue; |
936 | |
937 | for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; |
938 | ++i) { |
939 | ret = rdma_query_gid(device: cur_dev->device, port_num: p, index: i, |
940 | gid: &gid); |
941 | if (ret) |
942 | continue; |
943 | |
944 | if (!memcmp(p: &gid, q: dgid, size: sizeof(gid))) { |
945 | cma_dev = cur_dev; |
946 | sgid = gid; |
947 | id_priv->id.port_num = p; |
948 | goto found; |
949 | } |
950 | |
951 | if (!cma_dev && (gid.global.subnet_prefix == |
952 | dgid->global.subnet_prefix) && |
953 | port_state == IB_PORT_ACTIVE) { |
954 | cma_dev = cur_dev; |
955 | sgid = gid; |
956 | id_priv->id.port_num = p; |
957 | goto found; |
958 | } |
959 | } |
960 | } |
961 | } |
962 | mutex_unlock(lock: &lock); |
963 | return -ENODEV; |
964 | |
965 | found: |
966 | cma_attach_to_dev(id_priv, cma_dev); |
967 | rdma_restrack_add(res: &id_priv->res); |
968 | mutex_unlock(lock: &lock); |
969 | addr = (struct sockaddr_ib *)cma_src_addr(id_priv); |
970 | memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); |
971 | cma_translate_ib(sib: addr, dev_addr: &id_priv->id.route.addr.dev_addr); |
972 | return 0; |
973 | } |
974 | |
975 | static void cma_id_get(struct rdma_id_private *id_priv) |
976 | { |
977 | refcount_inc(r: &id_priv->refcount); |
978 | } |
979 | |
980 | static void cma_id_put(struct rdma_id_private *id_priv) |
981 | { |
982 | if (refcount_dec_and_test(r: &id_priv->refcount)) |
983 | complete(&id_priv->comp); |
984 | } |
985 | |
986 | static struct rdma_id_private * |
987 | __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, |
988 | void *context, enum rdma_ucm_port_space ps, |
989 | enum ib_qp_type qp_type, const struct rdma_id_private *parent) |
990 | { |
991 | struct rdma_id_private *id_priv; |
992 | |
993 | id_priv = kzalloc(size: sizeof *id_priv, GFP_KERNEL); |
994 | if (!id_priv) |
995 | return ERR_PTR(error: -ENOMEM); |
996 | |
997 | id_priv->state = RDMA_CM_IDLE; |
998 | id_priv->id.context = context; |
999 | id_priv->id.event_handler = event_handler; |
1000 | id_priv->id.ps = ps; |
1001 | id_priv->id.qp_type = qp_type; |
1002 | id_priv->tos_set = false; |
1003 | id_priv->timeout_set = false; |
1004 | id_priv->min_rnr_timer_set = false; |
1005 | id_priv->gid_type = IB_GID_TYPE_IB; |
1006 | spin_lock_init(&id_priv->lock); |
1007 | mutex_init(&id_priv->qp_mutex); |
1008 | init_completion(x: &id_priv->comp); |
1009 | refcount_set(r: &id_priv->refcount, n: 1); |
1010 | mutex_init(&id_priv->handler_mutex); |
1011 | INIT_LIST_HEAD(list: &id_priv->device_item); |
1012 | INIT_LIST_HEAD(list: &id_priv->id_list_entry); |
1013 | INIT_LIST_HEAD(list: &id_priv->listen_list); |
1014 | INIT_LIST_HEAD(list: &id_priv->mc_list); |
1015 | get_random_bytes(buf: &id_priv->seq_num, len: sizeof id_priv->seq_num); |
1016 | id_priv->id.route.addr.dev_addr.net = get_net(net); |
1017 | id_priv->seq_num &= 0x00ffffff; |
1018 | |
1019 | rdma_restrack_new(res: &id_priv->res, type: RDMA_RESTRACK_CM_ID); |
1020 | if (parent) |
1021 | rdma_restrack_parent_name(dst: &id_priv->res, parent: &parent->res); |
1022 | |
1023 | return id_priv; |
1024 | } |
1025 | |
1026 | struct rdma_cm_id * |
1027 | __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, |
1028 | void *context, enum rdma_ucm_port_space ps, |
1029 | enum ib_qp_type qp_type, const char *caller) |
1030 | { |
1031 | struct rdma_id_private *ret; |
1032 | |
1033 | ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); |
1034 | if (IS_ERR(ptr: ret)) |
1035 | return ERR_CAST(ptr: ret); |
1036 | |
1037 | rdma_restrack_set_name(res: &ret->res, caller); |
1038 | return &ret->id; |
1039 | } |
1040 | EXPORT_SYMBOL(__rdma_create_kernel_id); |
1041 | |
1042 | struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, |
1043 | void *context, |
1044 | enum rdma_ucm_port_space ps, |
1045 | enum ib_qp_type qp_type) |
1046 | { |
1047 | struct rdma_id_private *ret; |
1048 | |
1049 | ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, |
1050 | ps, qp_type, NULL); |
1051 | if (IS_ERR(ptr: ret)) |
1052 | return ERR_CAST(ptr: ret); |
1053 | |
1054 | rdma_restrack_set_name(res: &ret->res, NULL); |
1055 | return &ret->id; |
1056 | } |
1057 | EXPORT_SYMBOL(rdma_create_user_id); |
1058 | |
1059 | static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) |
1060 | { |
1061 | struct ib_qp_attr qp_attr; |
1062 | int qp_attr_mask, ret; |
1063 | |
1064 | qp_attr.qp_state = IB_QPS_INIT; |
1065 | ret = rdma_init_qp_attr(id: &id_priv->id, qp_attr: &qp_attr, qp_attr_mask: &qp_attr_mask); |
1066 | if (ret) |
1067 | return ret; |
1068 | |
1069 | ret = ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask); |
1070 | if (ret) |
1071 | return ret; |
1072 | |
1073 | qp_attr.qp_state = IB_QPS_RTR; |
1074 | ret = ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask: IB_QP_STATE); |
1075 | if (ret) |
1076 | return ret; |
1077 | |
1078 | qp_attr.qp_state = IB_QPS_RTS; |
1079 | qp_attr.sq_psn = 0; |
1080 | ret = ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask: IB_QP_STATE | IB_QP_SQ_PSN); |
1081 | |
1082 | return ret; |
1083 | } |
1084 | |
1085 | static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) |
1086 | { |
1087 | struct ib_qp_attr qp_attr; |
1088 | int qp_attr_mask, ret; |
1089 | |
1090 | qp_attr.qp_state = IB_QPS_INIT; |
1091 | ret = rdma_init_qp_attr(id: &id_priv->id, qp_attr: &qp_attr, qp_attr_mask: &qp_attr_mask); |
1092 | if (ret) |
1093 | return ret; |
1094 | |
1095 | return ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask); |
1096 | } |
1097 | |
1098 | int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, |
1099 | struct ib_qp_init_attr *qp_init_attr) |
1100 | { |
1101 | struct rdma_id_private *id_priv; |
1102 | struct ib_qp *qp; |
1103 | int ret; |
1104 | |
1105 | id_priv = container_of(id, struct rdma_id_private, id); |
1106 | if (id->device != pd->device) { |
1107 | ret = -EINVAL; |
1108 | goto out_err; |
1109 | } |
1110 | |
1111 | qp_init_attr->port_num = id->port_num; |
1112 | qp = ib_create_qp(pd, init_attr: qp_init_attr); |
1113 | if (IS_ERR(ptr: qp)) { |
1114 | ret = PTR_ERR(ptr: qp); |
1115 | goto out_err; |
1116 | } |
1117 | |
1118 | if (id->qp_type == IB_QPT_UD) |
1119 | ret = cma_init_ud_qp(id_priv, qp); |
1120 | else |
1121 | ret = cma_init_conn_qp(id_priv, qp); |
1122 | if (ret) |
1123 | goto out_destroy; |
1124 | |
1125 | id->qp = qp; |
1126 | id_priv->qp_num = qp->qp_num; |
1127 | id_priv->srq = (qp->srq != NULL); |
1128 | trace_cm_qp_create(id_priv, pd, qp_init_attr, rc: 0); |
1129 | return 0; |
1130 | out_destroy: |
1131 | ib_destroy_qp(qp); |
1132 | out_err: |
1133 | trace_cm_qp_create(id_priv, pd, qp_init_attr, rc: ret); |
1134 | return ret; |
1135 | } |
1136 | EXPORT_SYMBOL(rdma_create_qp); |
1137 | |
1138 | void rdma_destroy_qp(struct rdma_cm_id *id) |
1139 | { |
1140 | struct rdma_id_private *id_priv; |
1141 | |
1142 | id_priv = container_of(id, struct rdma_id_private, id); |
1143 | trace_cm_qp_destroy(id_priv); |
1144 | mutex_lock(&id_priv->qp_mutex); |
1145 | ib_destroy_qp(qp: id_priv->id.qp); |
1146 | id_priv->id.qp = NULL; |
1147 | mutex_unlock(lock: &id_priv->qp_mutex); |
1148 | } |
1149 | EXPORT_SYMBOL(rdma_destroy_qp); |
1150 | |
1151 | static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, |
1152 | struct rdma_conn_param *conn_param) |
1153 | { |
1154 | struct ib_qp_attr qp_attr; |
1155 | int qp_attr_mask, ret; |
1156 | |
1157 | mutex_lock(&id_priv->qp_mutex); |
1158 | if (!id_priv->id.qp) { |
1159 | ret = 0; |
1160 | goto out; |
1161 | } |
1162 | |
1163 | /* Need to update QP attributes from default values. */ |
1164 | qp_attr.qp_state = IB_QPS_INIT; |
1165 | ret = rdma_init_qp_attr(id: &id_priv->id, qp_attr: &qp_attr, qp_attr_mask: &qp_attr_mask); |
1166 | if (ret) |
1167 | goto out; |
1168 | |
1169 | ret = ib_modify_qp(qp: id_priv->id.qp, qp_attr: &qp_attr, qp_attr_mask); |
1170 | if (ret) |
1171 | goto out; |
1172 | |
1173 | qp_attr.qp_state = IB_QPS_RTR; |
1174 | ret = rdma_init_qp_attr(id: &id_priv->id, qp_attr: &qp_attr, qp_attr_mask: &qp_attr_mask); |
1175 | if (ret) |
1176 | goto out; |
1177 | |
1178 | BUG_ON(id_priv->cma_dev->device != id_priv->id.device); |
1179 | |
1180 | if (conn_param) |
1181 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; |
1182 | ret = ib_modify_qp(qp: id_priv->id.qp, qp_attr: &qp_attr, qp_attr_mask); |
1183 | out: |
1184 | mutex_unlock(lock: &id_priv->qp_mutex); |
1185 | return ret; |
1186 | } |
1187 | |
1188 | static int cma_modify_qp_rts(struct rdma_id_private *id_priv, |
1189 | struct rdma_conn_param *conn_param) |
1190 | { |
1191 | struct ib_qp_attr qp_attr; |
1192 | int qp_attr_mask, ret; |
1193 | |
1194 | mutex_lock(&id_priv->qp_mutex); |
1195 | if (!id_priv->id.qp) { |
1196 | ret = 0; |
1197 | goto out; |
1198 | } |
1199 | |
1200 | qp_attr.qp_state = IB_QPS_RTS; |
1201 | ret = rdma_init_qp_attr(id: &id_priv->id, qp_attr: &qp_attr, qp_attr_mask: &qp_attr_mask); |
1202 | if (ret) |
1203 | goto out; |
1204 | |
1205 | if (conn_param) |
1206 | qp_attr.max_rd_atomic = conn_param->initiator_depth; |
1207 | ret = ib_modify_qp(qp: id_priv->id.qp, qp_attr: &qp_attr, qp_attr_mask); |
1208 | out: |
1209 | mutex_unlock(lock: &id_priv->qp_mutex); |
1210 | return ret; |
1211 | } |
1212 | |
1213 | static int cma_modify_qp_err(struct rdma_id_private *id_priv) |
1214 | { |
1215 | struct ib_qp_attr qp_attr; |
1216 | int ret; |
1217 | |
1218 | mutex_lock(&id_priv->qp_mutex); |
1219 | if (!id_priv->id.qp) { |
1220 | ret = 0; |
1221 | goto out; |
1222 | } |
1223 | |
1224 | qp_attr.qp_state = IB_QPS_ERR; |
1225 | ret = ib_modify_qp(qp: id_priv->id.qp, qp_attr: &qp_attr, qp_attr_mask: IB_QP_STATE); |
1226 | out: |
1227 | mutex_unlock(lock: &id_priv->qp_mutex); |
1228 | return ret; |
1229 | } |
1230 | |
1231 | static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, |
1232 | struct ib_qp_attr *qp_attr, int *qp_attr_mask) |
1233 | { |
1234 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
1235 | int ret; |
1236 | u16 pkey; |
1237 | |
1238 | if (rdma_cap_eth_ah(device: id_priv->id.device, port_num: id_priv->id.port_num)) |
1239 | pkey = 0xffff; |
1240 | else |
1241 | pkey = ib_addr_get_pkey(dev_addr); |
1242 | |
1243 | ret = ib_find_cached_pkey(device: id_priv->id.device, port_num: id_priv->id.port_num, |
1244 | pkey, index: &qp_attr->pkey_index); |
1245 | if (ret) |
1246 | return ret; |
1247 | |
1248 | qp_attr->port_num = id_priv->id.port_num; |
1249 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
1250 | |
1251 | if (id_priv->id.qp_type == IB_QPT_UD) { |
1252 | ret = cma_set_default_qkey(id_priv); |
1253 | if (ret) |
1254 | return ret; |
1255 | |
1256 | qp_attr->qkey = id_priv->qkey; |
1257 | *qp_attr_mask |= IB_QP_QKEY; |
1258 | } else { |
1259 | qp_attr->qp_access_flags = 0; |
1260 | *qp_attr_mask |= IB_QP_ACCESS_FLAGS; |
1261 | } |
1262 | return 0; |
1263 | } |
1264 | |
1265 | int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, |
1266 | int *qp_attr_mask) |
1267 | { |
1268 | struct rdma_id_private *id_priv; |
1269 | int ret = 0; |
1270 | |
1271 | id_priv = container_of(id, struct rdma_id_private, id); |
1272 | if (rdma_cap_ib_cm(device: id->device, port_num: id->port_num)) { |
1273 | if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) |
1274 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); |
1275 | else |
1276 | ret = ib_cm_init_qp_attr(cm_id: id_priv->cm_id.ib, qp_attr, |
1277 | qp_attr_mask); |
1278 | |
1279 | if (qp_attr->qp_state == IB_QPS_RTR) |
1280 | qp_attr->rq_psn = id_priv->seq_num; |
1281 | } else if (rdma_cap_iw_cm(device: id->device, port_num: id->port_num)) { |
1282 | if (!id_priv->cm_id.iw) { |
1283 | qp_attr->qp_access_flags = 0; |
1284 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; |
1285 | } else |
1286 | ret = iw_cm_init_qp_attr(cm_id: id_priv->cm_id.iw, qp_attr, |
1287 | qp_attr_mask); |
1288 | qp_attr->port_num = id_priv->id.port_num; |
1289 | *qp_attr_mask |= IB_QP_PORT; |
1290 | } else { |
1291 | ret = -ENOSYS; |
1292 | } |
1293 | |
1294 | if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) |
1295 | qp_attr->timeout = id_priv->timeout; |
1296 | |
1297 | if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) |
1298 | qp_attr->min_rnr_timer = id_priv->min_rnr_timer; |
1299 | |
1300 | return ret; |
1301 | } |
1302 | EXPORT_SYMBOL(rdma_init_qp_attr); |
1303 | |
1304 | static inline bool cma_zero_addr(const struct sockaddr *addr) |
1305 | { |
1306 | switch (addr->sa_family) { |
1307 | case AF_INET: |
1308 | return ipv4_is_zeronet(addr: ((struct sockaddr_in *)addr)->sin_addr.s_addr); |
1309 | case AF_INET6: |
1310 | return ipv6_addr_any(a: &((struct sockaddr_in6 *)addr)->sin6_addr); |
1311 | case AF_IB: |
1312 | return ib_addr_any(a: &((struct sockaddr_ib *)addr)->sib_addr); |
1313 | default: |
1314 | return false; |
1315 | } |
1316 | } |
1317 | |
1318 | static inline bool cma_loopback_addr(const struct sockaddr *addr) |
1319 | { |
1320 | switch (addr->sa_family) { |
1321 | case AF_INET: |
1322 | return ipv4_is_loopback( |
1323 | addr: ((struct sockaddr_in *)addr)->sin_addr.s_addr); |
1324 | case AF_INET6: |
1325 | return ipv6_addr_loopback( |
1326 | a: &((struct sockaddr_in6 *)addr)->sin6_addr); |
1327 | case AF_IB: |
1328 | return ib_addr_loopback( |
1329 | a: &((struct sockaddr_ib *)addr)->sib_addr); |
1330 | default: |
1331 | return false; |
1332 | } |
1333 | } |
1334 | |
1335 | static inline bool cma_any_addr(const struct sockaddr *addr) |
1336 | { |
1337 | return cma_zero_addr(addr) || cma_loopback_addr(addr); |
1338 | } |
1339 | |
1340 | static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) |
1341 | { |
1342 | if (src->sa_family != dst->sa_family) |
1343 | return -1; |
1344 | |
1345 | switch (src->sa_family) { |
1346 | case AF_INET: |
1347 | return ((struct sockaddr_in *)src)->sin_addr.s_addr != |
1348 | ((struct sockaddr_in *)dst)->sin_addr.s_addr; |
1349 | case AF_INET6: { |
1350 | struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; |
1351 | struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; |
1352 | bool link_local; |
1353 | |
1354 | if (ipv6_addr_cmp(a1: &src_addr6->sin6_addr, |
1355 | a2: &dst_addr6->sin6_addr)) |
1356 | return 1; |
1357 | link_local = ipv6_addr_type(addr: &dst_addr6->sin6_addr) & |
1358 | IPV6_ADDR_LINKLOCAL; |
1359 | /* Link local must match their scope_ids */ |
1360 | return link_local ? (src_addr6->sin6_scope_id != |
1361 | dst_addr6->sin6_scope_id) : |
1362 | 0; |
1363 | } |
1364 | |
1365 | default: |
1366 | return ib_addr_cmp(a1: &((struct sockaddr_ib *) src)->sib_addr, |
1367 | a2: &((struct sockaddr_ib *) dst)->sib_addr); |
1368 | } |
1369 | } |
1370 | |
1371 | static __be16 cma_port(const struct sockaddr *addr) |
1372 | { |
1373 | struct sockaddr_ib *sib; |
1374 | |
1375 | switch (addr->sa_family) { |
1376 | case AF_INET: |
1377 | return ((struct sockaddr_in *) addr)->sin_port; |
1378 | case AF_INET6: |
1379 | return ((struct sockaddr_in6 *) addr)->sin6_port; |
1380 | case AF_IB: |
1381 | sib = (struct sockaddr_ib *) addr; |
1382 | return htons((u16) (be64_to_cpu(sib->sib_sid) & |
1383 | be64_to_cpu(sib->sib_sid_mask))); |
1384 | default: |
1385 | return 0; |
1386 | } |
1387 | } |
1388 | |
1389 | static inline int cma_any_port(const struct sockaddr *addr) |
1390 | { |
1391 | return !cma_port(addr); |
1392 | } |
1393 | |
1394 | static void cma_save_ib_info(struct sockaddr *src_addr, |
1395 | struct sockaddr *dst_addr, |
1396 | const struct rdma_cm_id *listen_id, |
1397 | const struct sa_path_rec *path) |
1398 | { |
1399 | struct sockaddr_ib *listen_ib, *ib; |
1400 | |
1401 | listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; |
1402 | if (src_addr) { |
1403 | ib = (struct sockaddr_ib *)src_addr; |
1404 | ib->sib_family = AF_IB; |
1405 | if (path) { |
1406 | ib->sib_pkey = path->pkey; |
1407 | ib->sib_flowinfo = path->flow_label; |
1408 | memcpy(&ib->sib_addr, &path->sgid, 16); |
1409 | ib->sib_sid = path->service_id; |
1410 | ib->sib_scope_id = 0; |
1411 | } else { |
1412 | ib->sib_pkey = listen_ib->sib_pkey; |
1413 | ib->sib_flowinfo = listen_ib->sib_flowinfo; |
1414 | ib->sib_addr = listen_ib->sib_addr; |
1415 | ib->sib_sid = listen_ib->sib_sid; |
1416 | ib->sib_scope_id = listen_ib->sib_scope_id; |
1417 | } |
1418 | ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); |
1419 | } |
1420 | if (dst_addr) { |
1421 | ib = (struct sockaddr_ib *)dst_addr; |
1422 | ib->sib_family = AF_IB; |
1423 | if (path) { |
1424 | ib->sib_pkey = path->pkey; |
1425 | ib->sib_flowinfo = path->flow_label; |
1426 | memcpy(&ib->sib_addr, &path->dgid, 16); |
1427 | } |
1428 | } |
1429 | } |
1430 | |
1431 | static void cma_save_ip4_info(struct sockaddr_in *src_addr, |
1432 | struct sockaddr_in *dst_addr, |
1433 | struct cma_hdr *hdr, |
1434 | __be16 local_port) |
1435 | { |
1436 | if (src_addr) { |
1437 | *src_addr = (struct sockaddr_in) { |
1438 | .sin_family = AF_INET, |
1439 | .sin_addr.s_addr = hdr->dst_addr.ip4.addr, |
1440 | .sin_port = local_port, |
1441 | }; |
1442 | } |
1443 | |
1444 | if (dst_addr) { |
1445 | *dst_addr = (struct sockaddr_in) { |
1446 | .sin_family = AF_INET, |
1447 | .sin_addr.s_addr = hdr->src_addr.ip4.addr, |
1448 | .sin_port = hdr->port, |
1449 | }; |
1450 | } |
1451 | } |
1452 | |
1453 | static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, |
1454 | struct sockaddr_in6 *dst_addr, |
1455 | struct cma_hdr *hdr, |
1456 | __be16 local_port) |
1457 | { |
1458 | if (src_addr) { |
1459 | *src_addr = (struct sockaddr_in6) { |
1460 | .sin6_family = AF_INET6, |
1461 | .sin6_addr = hdr->dst_addr.ip6, |
1462 | .sin6_port = local_port, |
1463 | }; |
1464 | } |
1465 | |
1466 | if (dst_addr) { |
1467 | *dst_addr = (struct sockaddr_in6) { |
1468 | .sin6_family = AF_INET6, |
1469 | .sin6_addr = hdr->src_addr.ip6, |
1470 | .sin6_port = hdr->port, |
1471 | }; |
1472 | } |
1473 | } |
1474 | |
1475 | static u16 cma_port_from_service_id(__be64 service_id) |
1476 | { |
1477 | return (u16)be64_to_cpu(service_id); |
1478 | } |
1479 | |
1480 | static int cma_save_ip_info(struct sockaddr *src_addr, |
1481 | struct sockaddr *dst_addr, |
1482 | const struct ib_cm_event *ib_event, |
1483 | __be64 service_id) |
1484 | { |
1485 | struct cma_hdr *hdr; |
1486 | __be16 port; |
1487 | |
1488 | hdr = ib_event->private_data; |
1489 | if (hdr->cma_version != CMA_VERSION) |
1490 | return -EINVAL; |
1491 | |
1492 | port = htons(cma_port_from_service_id(service_id)); |
1493 | |
1494 | switch (cma_get_ip_ver(hdr)) { |
1495 | case 4: |
1496 | cma_save_ip4_info(src_addr: (struct sockaddr_in *)src_addr, |
1497 | dst_addr: (struct sockaddr_in *)dst_addr, hdr, local_port: port); |
1498 | break; |
1499 | case 6: |
1500 | cma_save_ip6_info(src_addr: (struct sockaddr_in6 *)src_addr, |
1501 | dst_addr: (struct sockaddr_in6 *)dst_addr, hdr, local_port: port); |
1502 | break; |
1503 | default: |
1504 | return -EAFNOSUPPORT; |
1505 | } |
1506 | |
1507 | return 0; |
1508 | } |
1509 | |
1510 | static int cma_save_net_info(struct sockaddr *src_addr, |
1511 | struct sockaddr *dst_addr, |
1512 | const struct rdma_cm_id *listen_id, |
1513 | const struct ib_cm_event *ib_event, |
1514 | sa_family_t sa_family, __be64 service_id) |
1515 | { |
1516 | if (sa_family == AF_IB) { |
1517 | if (ib_event->event == IB_CM_REQ_RECEIVED) |
1518 | cma_save_ib_info(src_addr, dst_addr, listen_id, |
1519 | path: ib_event->param.req_rcvd.primary_path); |
1520 | else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) |
1521 | cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); |
1522 | return 0; |
1523 | } |
1524 | |
1525 | return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); |
1526 | } |
1527 | |
1528 | static int cma_save_req_info(const struct ib_cm_event *ib_event, |
1529 | struct cma_req_info *req) |
1530 | { |
1531 | const struct ib_cm_req_event_param *req_param = |
1532 | &ib_event->param.req_rcvd; |
1533 | const struct ib_cm_sidr_req_event_param *sidr_param = |
1534 | &ib_event->param.sidr_req_rcvd; |
1535 | |
1536 | switch (ib_event->event) { |
1537 | case IB_CM_REQ_RECEIVED: |
1538 | req->device = req_param->listen_id->device; |
1539 | req->port = req_param->port; |
1540 | memcpy(&req->local_gid, &req_param->primary_path->sgid, |
1541 | sizeof(req->local_gid)); |
1542 | req->has_gid = true; |
1543 | req->service_id = req_param->primary_path->service_id; |
1544 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); |
1545 | if (req->pkey != req_param->bth_pkey) |
1546 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" |
1547 | "RDMA CMA: in the future this may cause the request to be dropped\n" , |
1548 | req_param->bth_pkey, req->pkey); |
1549 | break; |
1550 | case IB_CM_SIDR_REQ_RECEIVED: |
1551 | req->device = sidr_param->listen_id->device; |
1552 | req->port = sidr_param->port; |
1553 | req->has_gid = false; |
1554 | req->service_id = sidr_param->service_id; |
1555 | req->pkey = sidr_param->pkey; |
1556 | if (req->pkey != sidr_param->bth_pkey) |
1557 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" |
1558 | "RDMA CMA: in the future this may cause the request to be dropped\n" , |
1559 | sidr_param->bth_pkey, req->pkey); |
1560 | break; |
1561 | default: |
1562 | return -EINVAL; |
1563 | } |
1564 | |
1565 | return 0; |
1566 | } |
1567 | |
1568 | static bool validate_ipv4_net_dev(struct net_device *net_dev, |
1569 | const struct sockaddr_in *dst_addr, |
1570 | const struct sockaddr_in *src_addr) |
1571 | { |
1572 | __be32 daddr = dst_addr->sin_addr.s_addr, |
1573 | saddr = src_addr->sin_addr.s_addr; |
1574 | struct fib_result res; |
1575 | struct flowi4 fl4; |
1576 | int err; |
1577 | bool ret; |
1578 | |
1579 | if (ipv4_is_multicast(addr: saddr) || ipv4_is_lbcast(addr: saddr) || |
1580 | ipv4_is_lbcast(addr: daddr) || ipv4_is_zeronet(addr: saddr) || |
1581 | ipv4_is_zeronet(addr: daddr) || ipv4_is_loopback(addr: daddr) || |
1582 | ipv4_is_loopback(addr: saddr)) |
1583 | return false; |
1584 | |
1585 | memset(&fl4, 0, sizeof(fl4)); |
1586 | fl4.flowi4_oif = net_dev->ifindex; |
1587 | fl4.daddr = daddr; |
1588 | fl4.saddr = saddr; |
1589 | |
1590 | rcu_read_lock(); |
1591 | err = fib_lookup(net: dev_net(dev: net_dev), flp: &fl4, res: &res, flags: 0); |
1592 | ret = err == 0 && FIB_RES_DEV(res) == net_dev; |
1593 | rcu_read_unlock(); |
1594 | |
1595 | return ret; |
1596 | } |
1597 | |
1598 | static bool validate_ipv6_net_dev(struct net_device *net_dev, |
1599 | const struct sockaddr_in6 *dst_addr, |
1600 | const struct sockaddr_in6 *src_addr) |
1601 | { |
1602 | #if IS_ENABLED(CONFIG_IPV6) |
1603 | const int strict = ipv6_addr_type(addr: &dst_addr->sin6_addr) & |
1604 | IPV6_ADDR_LINKLOCAL; |
1605 | struct rt6_info *rt = rt6_lookup(net: dev_net(dev: net_dev), daddr: &dst_addr->sin6_addr, |
1606 | saddr: &src_addr->sin6_addr, oif: net_dev->ifindex, |
1607 | NULL, flags: strict); |
1608 | bool ret; |
1609 | |
1610 | if (!rt) |
1611 | return false; |
1612 | |
1613 | ret = rt->rt6i_idev->dev == net_dev; |
1614 | ip6_rt_put(rt); |
1615 | |
1616 | return ret; |
1617 | #else |
1618 | return false; |
1619 | #endif |
1620 | } |
1621 | |
1622 | static bool validate_net_dev(struct net_device *net_dev, |
1623 | const struct sockaddr *daddr, |
1624 | const struct sockaddr *saddr) |
1625 | { |
1626 | const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; |
1627 | const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; |
1628 | const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; |
1629 | const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; |
1630 | |
1631 | switch (daddr->sa_family) { |
1632 | case AF_INET: |
1633 | return saddr->sa_family == AF_INET && |
1634 | validate_ipv4_net_dev(net_dev, dst_addr: daddr4, src_addr: saddr4); |
1635 | |
1636 | case AF_INET6: |
1637 | return saddr->sa_family == AF_INET6 && |
1638 | validate_ipv6_net_dev(net_dev, dst_addr: daddr6, src_addr: saddr6); |
1639 | |
1640 | default: |
1641 | return false; |
1642 | } |
1643 | } |
1644 | |
1645 | static struct net_device * |
1646 | roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) |
1647 | { |
1648 | const struct ib_gid_attr *sgid_attr = NULL; |
1649 | struct net_device *ndev; |
1650 | |
1651 | if (ib_event->event == IB_CM_REQ_RECEIVED) |
1652 | sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; |
1653 | else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) |
1654 | sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; |
1655 | |
1656 | if (!sgid_attr) |
1657 | return NULL; |
1658 | |
1659 | rcu_read_lock(); |
1660 | ndev = rdma_read_gid_attr_ndev_rcu(attr: sgid_attr); |
1661 | if (IS_ERR(ptr: ndev)) |
1662 | ndev = NULL; |
1663 | else |
1664 | dev_hold(dev: ndev); |
1665 | rcu_read_unlock(); |
1666 | return ndev; |
1667 | } |
1668 | |
1669 | static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, |
1670 | struct cma_req_info *req) |
1671 | { |
1672 | struct sockaddr *listen_addr = |
1673 | (struct sockaddr *)&req->listen_addr_storage; |
1674 | struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; |
1675 | struct net_device *net_dev; |
1676 | const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; |
1677 | int err; |
1678 | |
1679 | err = cma_save_ip_info(src_addr: listen_addr, dst_addr: src_addr, ib_event, |
1680 | service_id: req->service_id); |
1681 | if (err) |
1682 | return ERR_PTR(error: err); |
1683 | |
1684 | if (rdma_protocol_roce(device: req->device, port_num: req->port)) |
1685 | net_dev = roce_get_net_dev_by_cm_event(ib_event); |
1686 | else |
1687 | net_dev = ib_get_net_dev_by_params(dev: req->device, port: req->port, |
1688 | pkey: req->pkey, |
1689 | gid, addr: listen_addr); |
1690 | if (!net_dev) |
1691 | return ERR_PTR(error: -ENODEV); |
1692 | |
1693 | return net_dev; |
1694 | } |
1695 | |
1696 | static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) |
1697 | { |
1698 | return (be64_to_cpu(service_id) >> 16) & 0xffff; |
1699 | } |
1700 | |
1701 | static bool cma_match_private_data(struct rdma_id_private *id_priv, |
1702 | const struct cma_hdr *hdr) |
1703 | { |
1704 | struct sockaddr *addr = cma_src_addr(id_priv); |
1705 | __be32 ip4_addr; |
1706 | struct in6_addr ip6_addr; |
1707 | |
1708 | if (cma_any_addr(addr) && !id_priv->afonly) |
1709 | return true; |
1710 | |
1711 | switch (addr->sa_family) { |
1712 | case AF_INET: |
1713 | ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; |
1714 | if (cma_get_ip_ver(hdr) != 4) |
1715 | return false; |
1716 | if (!cma_any_addr(addr) && |
1717 | hdr->dst_addr.ip4.addr != ip4_addr) |
1718 | return false; |
1719 | break; |
1720 | case AF_INET6: |
1721 | ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; |
1722 | if (cma_get_ip_ver(hdr) != 6) |
1723 | return false; |
1724 | if (!cma_any_addr(addr) && |
1725 | memcmp(p: &hdr->dst_addr.ip6, q: &ip6_addr, size: sizeof(ip6_addr))) |
1726 | return false; |
1727 | break; |
1728 | case AF_IB: |
1729 | return true; |
1730 | default: |
1731 | return false; |
1732 | } |
1733 | |
1734 | return true; |
1735 | } |
1736 | |
1737 | static bool cma_protocol_roce(const struct rdma_cm_id *id) |
1738 | { |
1739 | struct ib_device *device = id->device; |
1740 | const u32 port_num = id->port_num ?: rdma_start_port(device); |
1741 | |
1742 | return rdma_protocol_roce(device, port_num); |
1743 | } |
1744 | |
1745 | static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) |
1746 | { |
1747 | const struct sockaddr *daddr = |
1748 | (const struct sockaddr *)&req->listen_addr_storage; |
1749 | const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; |
1750 | |
1751 | /* Returns true if the req is for IPv6 link local */ |
1752 | return (daddr->sa_family == AF_INET6 && |
1753 | (ipv6_addr_type(addr: &daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); |
1754 | } |
1755 | |
1756 | static bool cma_match_net_dev(const struct rdma_cm_id *id, |
1757 | const struct net_device *net_dev, |
1758 | const struct cma_req_info *req) |
1759 | { |
1760 | const struct rdma_addr *addr = &id->route.addr; |
1761 | |
1762 | if (!net_dev) |
1763 | /* This request is an AF_IB request */ |
1764 | return (!id->port_num || id->port_num == req->port) && |
1765 | (addr->src_addr.ss_family == AF_IB); |
1766 | |
1767 | /* |
1768 | * If the request is not for IPv6 link local, allow matching |
1769 | * request to any netdevice of the one or multiport rdma device. |
1770 | */ |
1771 | if (!cma_is_req_ipv6_ll(req)) |
1772 | return true; |
1773 | /* |
1774 | * Net namespaces must match, and if the listner is listening |
1775 | * on a specific netdevice than netdevice must match as well. |
1776 | */ |
1777 | if (net_eq(net1: dev_net(dev: net_dev), net2: addr->dev_addr.net) && |
1778 | (!!addr->dev_addr.bound_dev_if == |
1779 | (addr->dev_addr.bound_dev_if == net_dev->ifindex))) |
1780 | return true; |
1781 | else |
1782 | return false; |
1783 | } |
1784 | |
1785 | static struct rdma_id_private *cma_find_listener( |
1786 | const struct rdma_bind_list *bind_list, |
1787 | const struct ib_cm_id *cm_id, |
1788 | const struct ib_cm_event *ib_event, |
1789 | const struct cma_req_info *req, |
1790 | const struct net_device *net_dev) |
1791 | { |
1792 | struct rdma_id_private *id_priv, *id_priv_dev; |
1793 | |
1794 | lockdep_assert_held(&lock); |
1795 | |
1796 | if (!bind_list) |
1797 | return ERR_PTR(error: -EINVAL); |
1798 | |
1799 | hlist_for_each_entry(id_priv, &bind_list->owners, node) { |
1800 | if (cma_match_private_data(id_priv, hdr: ib_event->private_data)) { |
1801 | if (id_priv->id.device == cm_id->device && |
1802 | cma_match_net_dev(id: &id_priv->id, net_dev, req)) |
1803 | return id_priv; |
1804 | list_for_each_entry(id_priv_dev, |
1805 | &id_priv->listen_list, |
1806 | listen_item) { |
1807 | if (id_priv_dev->id.device == cm_id->device && |
1808 | cma_match_net_dev(id: &id_priv_dev->id, |
1809 | net_dev, req)) |
1810 | return id_priv_dev; |
1811 | } |
1812 | } |
1813 | } |
1814 | |
1815 | return ERR_PTR(error: -EINVAL); |
1816 | } |
1817 | |
1818 | static struct rdma_id_private * |
1819 | cma_ib_id_from_event(struct ib_cm_id *cm_id, |
1820 | const struct ib_cm_event *ib_event, |
1821 | struct cma_req_info *req, |
1822 | struct net_device **net_dev) |
1823 | { |
1824 | struct rdma_bind_list *bind_list; |
1825 | struct rdma_id_private *id_priv; |
1826 | int err; |
1827 | |
1828 | err = cma_save_req_info(ib_event, req); |
1829 | if (err) |
1830 | return ERR_PTR(error: err); |
1831 | |
1832 | *net_dev = cma_get_net_dev(ib_event, req); |
1833 | if (IS_ERR(ptr: *net_dev)) { |
1834 | if (PTR_ERR(ptr: *net_dev) == -EAFNOSUPPORT) { |
1835 | /* Assuming the protocol is AF_IB */ |
1836 | *net_dev = NULL; |
1837 | } else { |
1838 | return ERR_CAST(ptr: *net_dev); |
1839 | } |
1840 | } |
1841 | |
1842 | mutex_lock(&lock); |
1843 | /* |
1844 | * Net namespace might be getting deleted while route lookup, |
1845 | * cm_id lookup is in progress. Therefore, perform netdevice |
1846 | * validation, cm_id lookup under rcu lock. |
1847 | * RCU lock along with netdevice state check, synchronizes with |
1848 | * netdevice migrating to different net namespace and also avoids |
1849 | * case where net namespace doesn't get deleted while lookup is in |
1850 | * progress. |
1851 | * If the device state is not IFF_UP, its properties such as ifindex |
1852 | * and nd_net cannot be trusted to remain valid without rcu lock. |
1853 | * net/core/dev.c change_net_namespace() ensures to synchronize with |
1854 | * ongoing operations on net device after device is closed using |
1855 | * synchronize_net(). |
1856 | */ |
1857 | rcu_read_lock(); |
1858 | if (*net_dev) { |
1859 | /* |
1860 | * If netdevice is down, it is likely that it is administratively |
1861 | * down or it might be migrating to different namespace. |
1862 | * In that case avoid further processing, as the net namespace |
1863 | * or ifindex may change. |
1864 | */ |
1865 | if (((*net_dev)->flags & IFF_UP) == 0) { |
1866 | id_priv = ERR_PTR(error: -EHOSTUNREACH); |
1867 | goto err; |
1868 | } |
1869 | |
1870 | if (!validate_net_dev(net_dev: *net_dev, |
1871 | daddr: (struct sockaddr *)&req->src_addr_storage, |
1872 | saddr: (struct sockaddr *)&req->listen_addr_storage)) { |
1873 | id_priv = ERR_PTR(error: -EHOSTUNREACH); |
1874 | goto err; |
1875 | } |
1876 | } |
1877 | |
1878 | bind_list = cma_ps_find(net: *net_dev ? dev_net(dev: *net_dev) : &init_net, |
1879 | ps: rdma_ps_from_service_id(service_id: req->service_id), |
1880 | snum: cma_port_from_service_id(service_id: req->service_id)); |
1881 | id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, net_dev: *net_dev); |
1882 | err: |
1883 | rcu_read_unlock(); |
1884 | mutex_unlock(lock: &lock); |
1885 | if (IS_ERR(ptr: id_priv) && *net_dev) { |
1886 | dev_put(dev: *net_dev); |
1887 | *net_dev = NULL; |
1888 | } |
1889 | return id_priv; |
1890 | } |
1891 | |
1892 | static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) |
1893 | { |
1894 | return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); |
1895 | } |
1896 | |
1897 | static void cma_cancel_route(struct rdma_id_private *id_priv) |
1898 | { |
1899 | if (rdma_cap_ib_sa(device: id_priv->id.device, port_num: id_priv->id.port_num)) { |
1900 | if (id_priv->query) |
1901 | ib_sa_cancel_query(id: id_priv->query_id, query: id_priv->query); |
1902 | } |
1903 | } |
1904 | |
1905 | static void _cma_cancel_listens(struct rdma_id_private *id_priv) |
1906 | { |
1907 | struct rdma_id_private *dev_id_priv; |
1908 | |
1909 | lockdep_assert_held(&lock); |
1910 | |
1911 | /* |
1912 | * Remove from listen_any_list to prevent added devices from spawning |
1913 | * additional listen requests. |
1914 | */ |
1915 | list_del_init(entry: &id_priv->listen_any_item); |
1916 | |
1917 | while (!list_empty(head: &id_priv->listen_list)) { |
1918 | dev_id_priv = |
1919 | list_first_entry(&id_priv->listen_list, |
1920 | struct rdma_id_private, listen_item); |
1921 | /* sync with device removal to avoid duplicate destruction */ |
1922 | list_del_init(entry: &dev_id_priv->device_item); |
1923 | list_del_init(entry: &dev_id_priv->listen_item); |
1924 | mutex_unlock(lock: &lock); |
1925 | |
1926 | rdma_destroy_id(id: &dev_id_priv->id); |
1927 | mutex_lock(&lock); |
1928 | } |
1929 | } |
1930 | |
1931 | static void cma_cancel_listens(struct rdma_id_private *id_priv) |
1932 | { |
1933 | mutex_lock(&lock); |
1934 | _cma_cancel_listens(id_priv); |
1935 | mutex_unlock(lock: &lock); |
1936 | } |
1937 | |
1938 | static void cma_cancel_operation(struct rdma_id_private *id_priv, |
1939 | enum rdma_cm_state state) |
1940 | { |
1941 | switch (state) { |
1942 | case RDMA_CM_ADDR_QUERY: |
1943 | /* |
1944 | * We can avoid doing the rdma_addr_cancel() based on state, |
1945 | * only RDMA_CM_ADDR_QUERY has a work that could still execute. |
1946 | * Notice that the addr_handler work could still be exiting |
1947 | * outside this state, however due to the interaction with the |
1948 | * handler_mutex the work is guaranteed not to touch id_priv |
1949 | * during exit. |
1950 | */ |
1951 | rdma_addr_cancel(addr: &id_priv->id.route.addr.dev_addr); |
1952 | break; |
1953 | case RDMA_CM_ROUTE_QUERY: |
1954 | cma_cancel_route(id_priv); |
1955 | break; |
1956 | case RDMA_CM_LISTEN: |
1957 | if (cma_any_addr(addr: cma_src_addr(id_priv)) && !id_priv->cma_dev) |
1958 | cma_cancel_listens(id_priv); |
1959 | break; |
1960 | default: |
1961 | break; |
1962 | } |
1963 | } |
1964 | |
1965 | static void cma_release_port(struct rdma_id_private *id_priv) |
1966 | { |
1967 | struct rdma_bind_list *bind_list = id_priv->bind_list; |
1968 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
1969 | |
1970 | if (!bind_list) |
1971 | return; |
1972 | |
1973 | mutex_lock(&lock); |
1974 | hlist_del(n: &id_priv->node); |
1975 | if (hlist_empty(h: &bind_list->owners)) { |
1976 | cma_ps_remove(net, ps: bind_list->ps, snum: bind_list->port); |
1977 | kfree(objp: bind_list); |
1978 | } |
1979 | mutex_unlock(lock: &lock); |
1980 | } |
1981 | |
1982 | static void destroy_mc(struct rdma_id_private *id_priv, |
1983 | struct cma_multicast *mc) |
1984 | { |
1985 | bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); |
1986 | |
1987 | if (rdma_cap_ib_mcast(device: id_priv->id.device, port_num: id_priv->id.port_num)) |
1988 | ib_sa_free_multicast(multicast: mc->sa_mc); |
1989 | |
1990 | if (rdma_protocol_roce(device: id_priv->id.device, port_num: id_priv->id.port_num)) { |
1991 | struct rdma_dev_addr *dev_addr = |
1992 | &id_priv->id.route.addr.dev_addr; |
1993 | struct net_device *ndev = NULL; |
1994 | |
1995 | if (dev_addr->bound_dev_if) |
1996 | ndev = dev_get_by_index(net: dev_addr->net, |
1997 | ifindex: dev_addr->bound_dev_if); |
1998 | if (ndev && !send_only) { |
1999 | enum ib_gid_type gid_type; |
2000 | union ib_gid mgid; |
2001 | |
2002 | gid_type = id_priv->cma_dev->default_gid_type |
2003 | [id_priv->id.port_num - |
2004 | rdma_start_port( |
2005 | device: id_priv->cma_dev->device)]; |
2006 | cma_iboe_set_mgid(addr: (struct sockaddr *)&mc->addr, mgid: &mgid, |
2007 | gid_type); |
2008 | cma_igmp_send(ndev, mgid: &mgid, join: false); |
2009 | } |
2010 | dev_put(dev: ndev); |
2011 | |
2012 | cancel_work_sync(work: &mc->iboe_join.work); |
2013 | } |
2014 | kfree(objp: mc); |
2015 | } |
2016 | |
2017 | static void cma_leave_mc_groups(struct rdma_id_private *id_priv) |
2018 | { |
2019 | struct cma_multicast *mc; |
2020 | |
2021 | while (!list_empty(head: &id_priv->mc_list)) { |
2022 | mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, |
2023 | list); |
2024 | list_del(entry: &mc->list); |
2025 | destroy_mc(id_priv, mc); |
2026 | } |
2027 | } |
2028 | |
2029 | static void _destroy_id(struct rdma_id_private *id_priv, |
2030 | enum rdma_cm_state state) |
2031 | { |
2032 | cma_cancel_operation(id_priv, state); |
2033 | |
2034 | rdma_restrack_del(res: &id_priv->res); |
2035 | cma_remove_id_from_tree(id_priv); |
2036 | if (id_priv->cma_dev) { |
2037 | if (rdma_cap_ib_cm(device: id_priv->id.device, port_num: 1)) { |
2038 | if (id_priv->cm_id.ib) |
2039 | ib_destroy_cm_id(cm_id: id_priv->cm_id.ib); |
2040 | } else if (rdma_cap_iw_cm(device: id_priv->id.device, port_num: 1)) { |
2041 | if (id_priv->cm_id.iw) |
2042 | iw_destroy_cm_id(cm_id: id_priv->cm_id.iw); |
2043 | } |
2044 | cma_leave_mc_groups(id_priv); |
2045 | cma_release_dev(id_priv); |
2046 | } |
2047 | |
2048 | cma_release_port(id_priv); |
2049 | cma_id_put(id_priv); |
2050 | wait_for_completion(&id_priv->comp); |
2051 | |
2052 | if (id_priv->internal_id) |
2053 | cma_id_put(id_priv: id_priv->id.context); |
2054 | |
2055 | kfree(objp: id_priv->id.route.path_rec); |
2056 | kfree(objp: id_priv->id.route.path_rec_inbound); |
2057 | kfree(objp: id_priv->id.route.path_rec_outbound); |
2058 | |
2059 | put_net(net: id_priv->id.route.addr.dev_addr.net); |
2060 | kfree(objp: id_priv); |
2061 | } |
2062 | |
2063 | /* |
2064 | * destroy an ID from within the handler_mutex. This ensures that no other |
2065 | * handlers can start running concurrently. |
2066 | */ |
2067 | static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) |
2068 | __releases(&idprv->handler_mutex) |
2069 | { |
2070 | enum rdma_cm_state state; |
2071 | unsigned long flags; |
2072 | |
2073 | trace_cm_id_destroy(id_priv); |
2074 | |
2075 | /* |
2076 | * Setting the state to destroyed under the handler mutex provides a |
2077 | * fence against calling handler callbacks. If this is invoked due to |
2078 | * the failure of a handler callback then it guarentees that no future |
2079 | * handlers will be called. |
2080 | */ |
2081 | lockdep_assert_held(&id_priv->handler_mutex); |
2082 | spin_lock_irqsave(&id_priv->lock, flags); |
2083 | state = id_priv->state; |
2084 | id_priv->state = RDMA_CM_DESTROYING; |
2085 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
2086 | mutex_unlock(lock: &id_priv->handler_mutex); |
2087 | _destroy_id(id_priv, state); |
2088 | } |
2089 | |
2090 | void rdma_destroy_id(struct rdma_cm_id *id) |
2091 | { |
2092 | struct rdma_id_private *id_priv = |
2093 | container_of(id, struct rdma_id_private, id); |
2094 | |
2095 | mutex_lock(&id_priv->handler_mutex); |
2096 | destroy_id_handler_unlock(id_priv); |
2097 | } |
2098 | EXPORT_SYMBOL(rdma_destroy_id); |
2099 | |
2100 | static int cma_rep_recv(struct rdma_id_private *id_priv) |
2101 | { |
2102 | int ret; |
2103 | |
2104 | ret = cma_modify_qp_rtr(id_priv, NULL); |
2105 | if (ret) |
2106 | goto reject; |
2107 | |
2108 | ret = cma_modify_qp_rts(id_priv, NULL); |
2109 | if (ret) |
2110 | goto reject; |
2111 | |
2112 | trace_cm_send_rtu(id_priv); |
2113 | ret = ib_send_cm_rtu(cm_id: id_priv->cm_id.ib, NULL, private_data_len: 0); |
2114 | if (ret) |
2115 | goto reject; |
2116 | |
2117 | return 0; |
2118 | reject: |
2119 | pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n" , ret); |
2120 | cma_modify_qp_err(id_priv); |
2121 | trace_cm_send_rej(id_priv); |
2122 | ib_send_cm_rej(cm_id: id_priv->cm_id.ib, reason: IB_CM_REJ_CONSUMER_DEFINED, |
2123 | NULL, ari_length: 0, NULL, private_data_len: 0); |
2124 | return ret; |
2125 | } |
2126 | |
2127 | static void cma_set_rep_event_data(struct rdma_cm_event *event, |
2128 | const struct ib_cm_rep_event_param *rep_data, |
2129 | void *private_data) |
2130 | { |
2131 | event->param.conn.private_data = private_data; |
2132 | event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; |
2133 | event->param.conn.responder_resources = rep_data->responder_resources; |
2134 | event->param.conn.initiator_depth = rep_data->initiator_depth; |
2135 | event->param.conn.flow_control = rep_data->flow_control; |
2136 | event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; |
2137 | event->param.conn.srq = rep_data->srq; |
2138 | event->param.conn.qp_num = rep_data->remote_qpn; |
2139 | |
2140 | event->ece.vendor_id = rep_data->ece.vendor_id; |
2141 | event->ece.attr_mod = rep_data->ece.attr_mod; |
2142 | } |
2143 | |
2144 | static int cma_cm_event_handler(struct rdma_id_private *id_priv, |
2145 | struct rdma_cm_event *event) |
2146 | { |
2147 | int ret; |
2148 | |
2149 | lockdep_assert_held(&id_priv->handler_mutex); |
2150 | |
2151 | trace_cm_event_handler(id_priv, event); |
2152 | ret = id_priv->id.event_handler(&id_priv->id, event); |
2153 | trace_cm_event_done(id_priv, event, result: ret); |
2154 | return ret; |
2155 | } |
2156 | |
2157 | static int cma_ib_handler(struct ib_cm_id *cm_id, |
2158 | const struct ib_cm_event *ib_event) |
2159 | { |
2160 | struct rdma_id_private *id_priv = cm_id->context; |
2161 | struct rdma_cm_event event = {}; |
2162 | enum rdma_cm_state state; |
2163 | int ret; |
2164 | |
2165 | mutex_lock(&id_priv->handler_mutex); |
2166 | state = READ_ONCE(id_priv->state); |
2167 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
2168 | state != RDMA_CM_CONNECT) || |
2169 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
2170 | state != RDMA_CM_DISCONNECT)) |
2171 | goto out; |
2172 | |
2173 | switch (ib_event->event) { |
2174 | case IB_CM_REQ_ERROR: |
2175 | case IB_CM_REP_ERROR: |
2176 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
2177 | event.status = -ETIMEDOUT; |
2178 | break; |
2179 | case IB_CM_REP_RECEIVED: |
2180 | if (state == RDMA_CM_CONNECT && |
2181 | (id_priv->id.qp_type != IB_QPT_UD)) { |
2182 | trace_cm_send_mra(id_priv); |
2183 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, private_data_len: 0); |
2184 | } |
2185 | if (id_priv->id.qp) { |
2186 | event.status = cma_rep_recv(id_priv); |
2187 | event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : |
2188 | RDMA_CM_EVENT_ESTABLISHED; |
2189 | } else { |
2190 | event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; |
2191 | } |
2192 | cma_set_rep_event_data(event: &event, rep_data: &ib_event->param.rep_rcvd, |
2193 | private_data: ib_event->private_data); |
2194 | break; |
2195 | case IB_CM_RTU_RECEIVED: |
2196 | case IB_CM_USER_ESTABLISHED: |
2197 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
2198 | break; |
2199 | case IB_CM_DREQ_ERROR: |
2200 | event.status = -ETIMEDOUT; |
2201 | fallthrough; |
2202 | case IB_CM_DREQ_RECEIVED: |
2203 | case IB_CM_DREP_RECEIVED: |
2204 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_CONNECT, |
2205 | exch: RDMA_CM_DISCONNECT)) |
2206 | goto out; |
2207 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
2208 | break; |
2209 | case IB_CM_TIMEWAIT_EXIT: |
2210 | event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; |
2211 | break; |
2212 | case IB_CM_MRA_RECEIVED: |
2213 | /* ignore event */ |
2214 | goto out; |
2215 | case IB_CM_REJ_RECEIVED: |
2216 | pr_debug_ratelimited("RDMA CM: REJECTED: %s\n" , rdma_reject_msg(&id_priv->id, |
2217 | ib_event->param.rej_rcvd.reason)); |
2218 | cma_modify_qp_err(id_priv); |
2219 | event.status = ib_event->param.rej_rcvd.reason; |
2220 | event.event = RDMA_CM_EVENT_REJECTED; |
2221 | event.param.conn.private_data = ib_event->private_data; |
2222 | event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; |
2223 | break; |
2224 | default: |
2225 | pr_err("RDMA CMA: unexpected IB CM event: %d\n" , |
2226 | ib_event->event); |
2227 | goto out; |
2228 | } |
2229 | |
2230 | ret = cma_cm_event_handler(id_priv, event: &event); |
2231 | if (ret) { |
2232 | /* Destroy the CM ID by returning a non-zero value. */ |
2233 | id_priv->cm_id.ib = NULL; |
2234 | destroy_id_handler_unlock(id_priv); |
2235 | return ret; |
2236 | } |
2237 | out: |
2238 | mutex_unlock(lock: &id_priv->handler_mutex); |
2239 | return 0; |
2240 | } |
2241 | |
2242 | static struct rdma_id_private * |
2243 | cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, |
2244 | const struct ib_cm_event *ib_event, |
2245 | struct net_device *net_dev) |
2246 | { |
2247 | struct rdma_id_private *listen_id_priv; |
2248 | struct rdma_id_private *id_priv; |
2249 | struct rdma_cm_id *id; |
2250 | struct rdma_route *rt; |
2251 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
2252 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; |
2253 | const __be64 service_id = |
2254 | ib_event->param.req_rcvd.primary_path->service_id; |
2255 | int ret; |
2256 | |
2257 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
2258 | id_priv = __rdma_create_id(net: listen_id->route.addr.dev_addr.net, |
2259 | event_handler: listen_id->event_handler, context: listen_id->context, |
2260 | ps: listen_id->ps, |
2261 | qp_type: ib_event->param.req_rcvd.qp_type, |
2262 | parent: listen_id_priv); |
2263 | if (IS_ERR(ptr: id_priv)) |
2264 | return NULL; |
2265 | |
2266 | id = &id_priv->id; |
2267 | if (cma_save_net_info(src_addr: (struct sockaddr *)&id->route.addr.src_addr, |
2268 | dst_addr: (struct sockaddr *)&id->route.addr.dst_addr, |
2269 | listen_id, ib_event, sa_family: ss_family, service_id)) |
2270 | goto err; |
2271 | |
2272 | rt = &id->route; |
2273 | rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; |
2274 | rt->path_rec = kmalloc_array(n: rt->num_pri_alt_paths, |
2275 | size: sizeof(*rt->path_rec), GFP_KERNEL); |
2276 | if (!rt->path_rec) |
2277 | goto err; |
2278 | |
2279 | rt->path_rec[0] = *path; |
2280 | if (rt->num_pri_alt_paths == 2) |
2281 | rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; |
2282 | |
2283 | if (net_dev) { |
2284 | rdma_copy_src_l2_addr(dev_addr: &rt->addr.dev_addr, dev: net_dev); |
2285 | } else { |
2286 | if (!cma_protocol_roce(id: listen_id) && |
2287 | cma_any_addr(addr: cma_src_addr(id_priv))) { |
2288 | rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; |
2289 | rdma_addr_set_sgid(dev_addr: &rt->addr.dev_addr, gid: &rt->path_rec[0].sgid); |
2290 | ib_addr_set_pkey(dev_addr: &rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); |
2291 | } else if (!cma_any_addr(addr: cma_src_addr(id_priv))) { |
2292 | ret = cma_translate_addr(addr: cma_src_addr(id_priv), dev_addr: &rt->addr.dev_addr); |
2293 | if (ret) |
2294 | goto err; |
2295 | } |
2296 | } |
2297 | rdma_addr_set_dgid(dev_addr: &rt->addr.dev_addr, gid: &rt->path_rec[0].dgid); |
2298 | |
2299 | id_priv->state = RDMA_CM_CONNECT; |
2300 | return id_priv; |
2301 | |
2302 | err: |
2303 | rdma_destroy_id(id); |
2304 | return NULL; |
2305 | } |
2306 | |
2307 | static struct rdma_id_private * |
2308 | cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, |
2309 | const struct ib_cm_event *ib_event, |
2310 | struct net_device *net_dev) |
2311 | { |
2312 | const struct rdma_id_private *listen_id_priv; |
2313 | struct rdma_id_private *id_priv; |
2314 | struct rdma_cm_id *id; |
2315 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
2316 | struct net *net = listen_id->route.addr.dev_addr.net; |
2317 | int ret; |
2318 | |
2319 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
2320 | id_priv = __rdma_create_id(net, event_handler: listen_id->event_handler, |
2321 | context: listen_id->context, ps: listen_id->ps, qp_type: IB_QPT_UD, |
2322 | parent: listen_id_priv); |
2323 | if (IS_ERR(ptr: id_priv)) |
2324 | return NULL; |
2325 | |
2326 | id = &id_priv->id; |
2327 | if (cma_save_net_info(src_addr: (struct sockaddr *)&id->route.addr.src_addr, |
2328 | dst_addr: (struct sockaddr *)&id->route.addr.dst_addr, |
2329 | listen_id, ib_event, sa_family: ss_family, |
2330 | service_id: ib_event->param.sidr_req_rcvd.service_id)) |
2331 | goto err; |
2332 | |
2333 | if (net_dev) { |
2334 | rdma_copy_src_l2_addr(dev_addr: &id->route.addr.dev_addr, dev: net_dev); |
2335 | } else { |
2336 | if (!cma_any_addr(addr: cma_src_addr(id_priv))) { |
2337 | ret = cma_translate_addr(addr: cma_src_addr(id_priv), |
2338 | dev_addr: &id->route.addr.dev_addr); |
2339 | if (ret) |
2340 | goto err; |
2341 | } |
2342 | } |
2343 | |
2344 | id_priv->state = RDMA_CM_CONNECT; |
2345 | return id_priv; |
2346 | err: |
2347 | rdma_destroy_id(id); |
2348 | return NULL; |
2349 | } |
2350 | |
2351 | static void cma_set_req_event_data(struct rdma_cm_event *event, |
2352 | const struct ib_cm_req_event_param *req_data, |
2353 | void *private_data, int offset) |
2354 | { |
2355 | event->param.conn.private_data = private_data + offset; |
2356 | event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; |
2357 | event->param.conn.responder_resources = req_data->responder_resources; |
2358 | event->param.conn.initiator_depth = req_data->initiator_depth; |
2359 | event->param.conn.flow_control = req_data->flow_control; |
2360 | event->param.conn.retry_count = req_data->retry_count; |
2361 | event->param.conn.rnr_retry_count = req_data->rnr_retry_count; |
2362 | event->param.conn.srq = req_data->srq; |
2363 | event->param.conn.qp_num = req_data->remote_qpn; |
2364 | |
2365 | event->ece.vendor_id = req_data->ece.vendor_id; |
2366 | event->ece.attr_mod = req_data->ece.attr_mod; |
2367 | } |
2368 | |
2369 | static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, |
2370 | const struct ib_cm_event *ib_event) |
2371 | { |
2372 | return (((ib_event->event == IB_CM_REQ_RECEIVED) && |
2373 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || |
2374 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && |
2375 | (id->qp_type == IB_QPT_UD)) || |
2376 | (!id->qp_type)); |
2377 | } |
2378 | |
2379 | static int cma_ib_req_handler(struct ib_cm_id *cm_id, |
2380 | const struct ib_cm_event *ib_event) |
2381 | { |
2382 | struct rdma_id_private *listen_id, *conn_id = NULL; |
2383 | struct rdma_cm_event event = {}; |
2384 | struct cma_req_info req = {}; |
2385 | struct net_device *net_dev; |
2386 | u8 offset; |
2387 | int ret; |
2388 | |
2389 | listen_id = cma_ib_id_from_event(cm_id, ib_event, req: &req, net_dev: &net_dev); |
2390 | if (IS_ERR(ptr: listen_id)) |
2391 | return PTR_ERR(ptr: listen_id); |
2392 | |
2393 | trace_cm_req_handler(id_priv: listen_id, event: ib_event->event); |
2394 | if (!cma_ib_check_req_qp_type(id: &listen_id->id, ib_event)) { |
2395 | ret = -EINVAL; |
2396 | goto net_dev_put; |
2397 | } |
2398 | |
2399 | mutex_lock(&listen_id->handler_mutex); |
2400 | if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { |
2401 | ret = -ECONNABORTED; |
2402 | goto err_unlock; |
2403 | } |
2404 | |
2405 | offset = cma_user_data_offset(id_priv: listen_id); |
2406 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
2407 | if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { |
2408 | conn_id = cma_ib_new_udp_id(listen_id: &listen_id->id, ib_event, net_dev); |
2409 | event.param.ud.private_data = ib_event->private_data + offset; |
2410 | event.param.ud.private_data_len = |
2411 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; |
2412 | } else { |
2413 | conn_id = cma_ib_new_conn_id(listen_id: &listen_id->id, ib_event, net_dev); |
2414 | cma_set_req_event_data(event: &event, req_data: &ib_event->param.req_rcvd, |
2415 | private_data: ib_event->private_data, offset); |
2416 | } |
2417 | if (!conn_id) { |
2418 | ret = -ENOMEM; |
2419 | goto err_unlock; |
2420 | } |
2421 | |
2422 | mutex_lock_nested(lock: &conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
2423 | ret = cma_ib_acquire_dev(id_priv: conn_id, listen_id_priv: listen_id, req: &req); |
2424 | if (ret) { |
2425 | destroy_id_handler_unlock(id_priv: conn_id); |
2426 | goto err_unlock; |
2427 | } |
2428 | |
2429 | conn_id->cm_id.ib = cm_id; |
2430 | cm_id->context = conn_id; |
2431 | cm_id->cm_handler = cma_ib_handler; |
2432 | |
2433 | ret = cma_cm_event_handler(id_priv: conn_id, event: &event); |
2434 | if (ret) { |
2435 | /* Destroy the CM ID by returning a non-zero value. */ |
2436 | conn_id->cm_id.ib = NULL; |
2437 | mutex_unlock(lock: &listen_id->handler_mutex); |
2438 | destroy_id_handler_unlock(id_priv: conn_id); |
2439 | goto net_dev_put; |
2440 | } |
2441 | |
2442 | if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && |
2443 | conn_id->id.qp_type != IB_QPT_UD) { |
2444 | trace_cm_send_mra(id_priv: cm_id->context); |
2445 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, private_data_len: 0); |
2446 | } |
2447 | mutex_unlock(lock: &conn_id->handler_mutex); |
2448 | |
2449 | err_unlock: |
2450 | mutex_unlock(lock: &listen_id->handler_mutex); |
2451 | |
2452 | net_dev_put: |
2453 | dev_put(dev: net_dev); |
2454 | |
2455 | return ret; |
2456 | } |
2457 | |
2458 | __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) |
2459 | { |
2460 | if (addr->sa_family == AF_IB) |
2461 | return ((struct sockaddr_ib *) addr)->sib_sid; |
2462 | |
2463 | return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); |
2464 | } |
2465 | EXPORT_SYMBOL(rdma_get_service_id); |
2466 | |
2467 | void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, |
2468 | union ib_gid *dgid) |
2469 | { |
2470 | struct rdma_addr *addr = &cm_id->route.addr; |
2471 | |
2472 | if (!cm_id->device) { |
2473 | if (sgid) |
2474 | memset(sgid, 0, sizeof(*sgid)); |
2475 | if (dgid) |
2476 | memset(dgid, 0, sizeof(*dgid)); |
2477 | return; |
2478 | } |
2479 | |
2480 | if (rdma_protocol_roce(device: cm_id->device, port_num: cm_id->port_num)) { |
2481 | if (sgid) |
2482 | rdma_ip2gid(addr: (struct sockaddr *)&addr->src_addr, gid: sgid); |
2483 | if (dgid) |
2484 | rdma_ip2gid(addr: (struct sockaddr *)&addr->dst_addr, gid: dgid); |
2485 | } else { |
2486 | if (sgid) |
2487 | rdma_addr_get_sgid(dev_addr: &addr->dev_addr, gid: sgid); |
2488 | if (dgid) |
2489 | rdma_addr_get_dgid(dev_addr: &addr->dev_addr, gid: dgid); |
2490 | } |
2491 | } |
2492 | EXPORT_SYMBOL(rdma_read_gids); |
2493 | |
2494 | static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) |
2495 | { |
2496 | struct rdma_id_private *id_priv = iw_id->context; |
2497 | struct rdma_cm_event event = {}; |
2498 | int ret = 0; |
2499 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
2500 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
2501 | |
2502 | mutex_lock(&id_priv->handler_mutex); |
2503 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
2504 | goto out; |
2505 | |
2506 | switch (iw_event->event) { |
2507 | case IW_CM_EVENT_CLOSE: |
2508 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
2509 | break; |
2510 | case IW_CM_EVENT_CONNECT_REPLY: |
2511 | memcpy(cma_src_addr(id_priv), laddr, |
2512 | rdma_addr_size(laddr)); |
2513 | memcpy(cma_dst_addr(id_priv), raddr, |
2514 | rdma_addr_size(raddr)); |
2515 | switch (iw_event->status) { |
2516 | case 0: |
2517 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
2518 | event.param.conn.initiator_depth = iw_event->ird; |
2519 | event.param.conn.responder_resources = iw_event->ord; |
2520 | break; |
2521 | case -ECONNRESET: |
2522 | case -ECONNREFUSED: |
2523 | event.event = RDMA_CM_EVENT_REJECTED; |
2524 | break; |
2525 | case -ETIMEDOUT: |
2526 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
2527 | break; |
2528 | default: |
2529 | event.event = RDMA_CM_EVENT_CONNECT_ERROR; |
2530 | break; |
2531 | } |
2532 | break; |
2533 | case IW_CM_EVENT_ESTABLISHED: |
2534 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
2535 | event.param.conn.initiator_depth = iw_event->ird; |
2536 | event.param.conn.responder_resources = iw_event->ord; |
2537 | break; |
2538 | default: |
2539 | goto out; |
2540 | } |
2541 | |
2542 | event.status = iw_event->status; |
2543 | event.param.conn.private_data = iw_event->private_data; |
2544 | event.param.conn.private_data_len = iw_event->private_data_len; |
2545 | ret = cma_cm_event_handler(id_priv, event: &event); |
2546 | if (ret) { |
2547 | /* Destroy the CM ID by returning a non-zero value. */ |
2548 | id_priv->cm_id.iw = NULL; |
2549 | destroy_id_handler_unlock(id_priv); |
2550 | return ret; |
2551 | } |
2552 | |
2553 | out: |
2554 | mutex_unlock(lock: &id_priv->handler_mutex); |
2555 | return ret; |
2556 | } |
2557 | |
2558 | static int iw_conn_req_handler(struct iw_cm_id *cm_id, |
2559 | struct iw_cm_event *iw_event) |
2560 | { |
2561 | struct rdma_id_private *listen_id, *conn_id; |
2562 | struct rdma_cm_event event = {}; |
2563 | int ret = -ECONNABORTED; |
2564 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
2565 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
2566 | |
2567 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
2568 | event.param.conn.private_data = iw_event->private_data; |
2569 | event.param.conn.private_data_len = iw_event->private_data_len; |
2570 | event.param.conn.initiator_depth = iw_event->ird; |
2571 | event.param.conn.responder_resources = iw_event->ord; |
2572 | |
2573 | listen_id = cm_id->context; |
2574 | |
2575 | mutex_lock(&listen_id->handler_mutex); |
2576 | if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) |
2577 | goto out; |
2578 | |
2579 | /* Create a new RDMA id for the new IW CM ID */ |
2580 | conn_id = __rdma_create_id(net: listen_id->id.route.addr.dev_addr.net, |
2581 | event_handler: listen_id->id.event_handler, |
2582 | context: listen_id->id.context, ps: RDMA_PS_TCP, |
2583 | qp_type: IB_QPT_RC, parent: listen_id); |
2584 | if (IS_ERR(ptr: conn_id)) { |
2585 | ret = -ENOMEM; |
2586 | goto out; |
2587 | } |
2588 | mutex_lock_nested(lock: &conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
2589 | conn_id->state = RDMA_CM_CONNECT; |
2590 | |
2591 | ret = rdma_translate_ip(addr: laddr, dev_addr: &conn_id->id.route.addr.dev_addr); |
2592 | if (ret) { |
2593 | mutex_unlock(lock: &listen_id->handler_mutex); |
2594 | destroy_id_handler_unlock(id_priv: conn_id); |
2595 | return ret; |
2596 | } |
2597 | |
2598 | ret = cma_iw_acquire_dev(id_priv: conn_id, listen_id_priv: listen_id); |
2599 | if (ret) { |
2600 | mutex_unlock(lock: &listen_id->handler_mutex); |
2601 | destroy_id_handler_unlock(id_priv: conn_id); |
2602 | return ret; |
2603 | } |
2604 | |
2605 | conn_id->cm_id.iw = cm_id; |
2606 | cm_id->context = conn_id; |
2607 | cm_id->cm_handler = cma_iw_handler; |
2608 | |
2609 | memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); |
2610 | memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); |
2611 | |
2612 | ret = cma_cm_event_handler(id_priv: conn_id, event: &event); |
2613 | if (ret) { |
2614 | /* User wants to destroy the CM ID */ |
2615 | conn_id->cm_id.iw = NULL; |
2616 | mutex_unlock(lock: &listen_id->handler_mutex); |
2617 | destroy_id_handler_unlock(id_priv: conn_id); |
2618 | return ret; |
2619 | } |
2620 | |
2621 | mutex_unlock(lock: &conn_id->handler_mutex); |
2622 | |
2623 | out: |
2624 | mutex_unlock(lock: &listen_id->handler_mutex); |
2625 | return ret; |
2626 | } |
2627 | |
2628 | static int cma_ib_listen(struct rdma_id_private *id_priv) |
2629 | { |
2630 | struct sockaddr *addr; |
2631 | struct ib_cm_id *id; |
2632 | __be64 svc_id; |
2633 | |
2634 | addr = cma_src_addr(id_priv); |
2635 | svc_id = rdma_get_service_id(&id_priv->id, addr); |
2636 | id = ib_cm_insert_listen(device: id_priv->id.device, |
2637 | cm_handler: cma_ib_req_handler, service_id: svc_id); |
2638 | if (IS_ERR(ptr: id)) |
2639 | return PTR_ERR(ptr: id); |
2640 | id_priv->cm_id.ib = id; |
2641 | |
2642 | return 0; |
2643 | } |
2644 | |
2645 | static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) |
2646 | { |
2647 | int ret; |
2648 | struct iw_cm_id *id; |
2649 | |
2650 | id = iw_create_cm_id(device: id_priv->id.device, |
2651 | cm_handler: iw_conn_req_handler, |
2652 | context: id_priv); |
2653 | if (IS_ERR(ptr: id)) |
2654 | return PTR_ERR(ptr: id); |
2655 | |
2656 | mutex_lock(&id_priv->qp_mutex); |
2657 | id->tos = id_priv->tos; |
2658 | id->tos_set = id_priv->tos_set; |
2659 | mutex_unlock(lock: &id_priv->qp_mutex); |
2660 | id->afonly = id_priv->afonly; |
2661 | id_priv->cm_id.iw = id; |
2662 | |
2663 | memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), |
2664 | rdma_addr_size(cma_src_addr(id_priv))); |
2665 | |
2666 | ret = iw_cm_listen(cm_id: id_priv->cm_id.iw, backlog); |
2667 | |
2668 | if (ret) { |
2669 | iw_destroy_cm_id(cm_id: id_priv->cm_id.iw); |
2670 | id_priv->cm_id.iw = NULL; |
2671 | } |
2672 | |
2673 | return ret; |
2674 | } |
2675 | |
2676 | static int cma_listen_handler(struct rdma_cm_id *id, |
2677 | struct rdma_cm_event *event) |
2678 | { |
2679 | struct rdma_id_private *id_priv = id->context; |
2680 | |
2681 | /* Listening IDs are always destroyed on removal */ |
2682 | if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) |
2683 | return -1; |
2684 | |
2685 | id->context = id_priv->id.context; |
2686 | id->event_handler = id_priv->id.event_handler; |
2687 | trace_cm_event_handler(id_priv, event); |
2688 | return id_priv->id.event_handler(id, event); |
2689 | } |
2690 | |
2691 | static int cma_listen_on_dev(struct rdma_id_private *id_priv, |
2692 | struct cma_device *cma_dev, |
2693 | struct rdma_id_private **to_destroy) |
2694 | { |
2695 | struct rdma_id_private *dev_id_priv; |
2696 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
2697 | int ret; |
2698 | |
2699 | lockdep_assert_held(&lock); |
2700 | |
2701 | *to_destroy = NULL; |
2702 | if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(device: cma_dev->device, port_num: 1)) |
2703 | return 0; |
2704 | |
2705 | dev_id_priv = |
2706 | __rdma_create_id(net, event_handler: cma_listen_handler, context: id_priv, |
2707 | ps: id_priv->id.ps, qp_type: id_priv->id.qp_type, parent: id_priv); |
2708 | if (IS_ERR(ptr: dev_id_priv)) |
2709 | return PTR_ERR(ptr: dev_id_priv); |
2710 | |
2711 | dev_id_priv->state = RDMA_CM_ADDR_BOUND; |
2712 | memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), |
2713 | rdma_addr_size(cma_src_addr(id_priv))); |
2714 | |
2715 | _cma_attach_to_dev(id_priv: dev_id_priv, cma_dev); |
2716 | rdma_restrack_add(res: &dev_id_priv->res); |
2717 | cma_id_get(id_priv); |
2718 | dev_id_priv->internal_id = 1; |
2719 | dev_id_priv->afonly = id_priv->afonly; |
2720 | mutex_lock(&id_priv->qp_mutex); |
2721 | dev_id_priv->tos_set = id_priv->tos_set; |
2722 | dev_id_priv->tos = id_priv->tos; |
2723 | mutex_unlock(lock: &id_priv->qp_mutex); |
2724 | |
2725 | ret = rdma_listen(id: &dev_id_priv->id, backlog: id_priv->backlog); |
2726 | if (ret) |
2727 | goto err_listen; |
2728 | list_add_tail(new: &dev_id_priv->listen_item, head: &id_priv->listen_list); |
2729 | return 0; |
2730 | err_listen: |
2731 | /* Caller must destroy this after releasing lock */ |
2732 | *to_destroy = dev_id_priv; |
2733 | dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n" , __func__, ret); |
2734 | return ret; |
2735 | } |
2736 | |
2737 | static int cma_listen_on_all(struct rdma_id_private *id_priv) |
2738 | { |
2739 | struct rdma_id_private *to_destroy; |
2740 | struct cma_device *cma_dev; |
2741 | int ret; |
2742 | |
2743 | mutex_lock(&lock); |
2744 | list_add_tail(new: &id_priv->listen_any_item, head: &listen_any_list); |
2745 | list_for_each_entry(cma_dev, &dev_list, list) { |
2746 | ret = cma_listen_on_dev(id_priv, cma_dev, to_destroy: &to_destroy); |
2747 | if (ret) { |
2748 | /* Prevent racing with cma_process_remove() */ |
2749 | if (to_destroy) |
2750 | list_del_init(entry: &to_destroy->device_item); |
2751 | goto err_listen; |
2752 | } |
2753 | } |
2754 | mutex_unlock(lock: &lock); |
2755 | return 0; |
2756 | |
2757 | err_listen: |
2758 | _cma_cancel_listens(id_priv); |
2759 | mutex_unlock(lock: &lock); |
2760 | if (to_destroy) |
2761 | rdma_destroy_id(&to_destroy->id); |
2762 | return ret; |
2763 | } |
2764 | |
2765 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) |
2766 | { |
2767 | struct rdma_id_private *id_priv; |
2768 | |
2769 | id_priv = container_of(id, struct rdma_id_private, id); |
2770 | mutex_lock(&id_priv->qp_mutex); |
2771 | id_priv->tos = (u8) tos; |
2772 | id_priv->tos_set = true; |
2773 | mutex_unlock(lock: &id_priv->qp_mutex); |
2774 | } |
2775 | EXPORT_SYMBOL(rdma_set_service_type); |
2776 | |
2777 | /** |
2778 | * rdma_set_ack_timeout() - Set the ack timeout of QP associated |
2779 | * with a connection identifier. |
2780 | * @id: Communication identifier to associated with service type. |
2781 | * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. |
2782 | * |
2783 | * This function should be called before rdma_connect() on active side, |
2784 | * and on passive side before rdma_accept(). It is applicable to primary |
2785 | * path only. The timeout will affect the local side of the QP, it is not |
2786 | * negotiated with remote side and zero disables the timer. In case it is |
2787 | * set before rdma_resolve_route, the value will also be used to determine |
2788 | * PacketLifeTime for RoCE. |
2789 | * |
2790 | * Return: 0 for success |
2791 | */ |
2792 | int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) |
2793 | { |
2794 | struct rdma_id_private *id_priv; |
2795 | |
2796 | if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) |
2797 | return -EINVAL; |
2798 | |
2799 | id_priv = container_of(id, struct rdma_id_private, id); |
2800 | mutex_lock(&id_priv->qp_mutex); |
2801 | id_priv->timeout = timeout; |
2802 | id_priv->timeout_set = true; |
2803 | mutex_unlock(lock: &id_priv->qp_mutex); |
2804 | |
2805 | return 0; |
2806 | } |
2807 | EXPORT_SYMBOL(rdma_set_ack_timeout); |
2808 | |
2809 | /** |
2810 | * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the |
2811 | * QP associated with a connection identifier. |
2812 | * @id: Communication identifier to associated with service type. |
2813 | * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK |
2814 | * Timer Field" in the IBTA specification. |
2815 | * |
2816 | * This function should be called before rdma_connect() on active |
2817 | * side, and on passive side before rdma_accept(). The timer value |
2818 | * will be associated with the local QP. When it receives a send it is |
2819 | * not read to handle, typically if the receive queue is empty, an RNR |
2820 | * Retry NAK is returned to the requester with the min_rnr_timer |
2821 | * encoded. The requester will then wait at least the time specified |
2822 | * in the NAK before retrying. The default is zero, which translates |
2823 | * to a minimum RNR Timer value of 655 ms. |
2824 | * |
2825 | * Return: 0 for success |
2826 | */ |
2827 | int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) |
2828 | { |
2829 | struct rdma_id_private *id_priv; |
2830 | |
2831 | /* It is a five-bit value */ |
2832 | if (min_rnr_timer & 0xe0) |
2833 | return -EINVAL; |
2834 | |
2835 | if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) |
2836 | return -EINVAL; |
2837 | |
2838 | id_priv = container_of(id, struct rdma_id_private, id); |
2839 | mutex_lock(&id_priv->qp_mutex); |
2840 | id_priv->min_rnr_timer = min_rnr_timer; |
2841 | id_priv->min_rnr_timer_set = true; |
2842 | mutex_unlock(lock: &id_priv->qp_mutex); |
2843 | |
2844 | return 0; |
2845 | } |
2846 | EXPORT_SYMBOL(rdma_set_min_rnr_timer); |
2847 | |
2848 | static int route_set_path_rec_inbound(struct cma_work *work, |
2849 | struct sa_path_rec *path_rec) |
2850 | { |
2851 | struct rdma_route *route = &work->id->id.route; |
2852 | |
2853 | if (!route->path_rec_inbound) { |
2854 | route->path_rec_inbound = |
2855 | kzalloc(size: sizeof(*route->path_rec_inbound), GFP_KERNEL); |
2856 | if (!route->path_rec_inbound) |
2857 | return -ENOMEM; |
2858 | } |
2859 | |
2860 | *route->path_rec_inbound = *path_rec; |
2861 | return 0; |
2862 | } |
2863 | |
2864 | static int route_set_path_rec_outbound(struct cma_work *work, |
2865 | struct sa_path_rec *path_rec) |
2866 | { |
2867 | struct rdma_route *route = &work->id->id.route; |
2868 | |
2869 | if (!route->path_rec_outbound) { |
2870 | route->path_rec_outbound = |
2871 | kzalloc(size: sizeof(*route->path_rec_outbound), GFP_KERNEL); |
2872 | if (!route->path_rec_outbound) |
2873 | return -ENOMEM; |
2874 | } |
2875 | |
2876 | *route->path_rec_outbound = *path_rec; |
2877 | return 0; |
2878 | } |
2879 | |
2880 | static void cma_query_handler(int status, struct sa_path_rec *path_rec, |
2881 | unsigned int num_prs, void *context) |
2882 | { |
2883 | struct cma_work *work = context; |
2884 | struct rdma_route *route; |
2885 | int i; |
2886 | |
2887 | route = &work->id->id.route; |
2888 | |
2889 | if (status) |
2890 | goto fail; |
2891 | |
2892 | for (i = 0; i < num_prs; i++) { |
2893 | if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) |
2894 | *route->path_rec = path_rec[i]; |
2895 | else if (path_rec[i].flags & IB_PATH_INBOUND) |
2896 | status = route_set_path_rec_inbound(work, path_rec: &path_rec[i]); |
2897 | else if (path_rec[i].flags & IB_PATH_OUTBOUND) |
2898 | status = route_set_path_rec_outbound(work, |
2899 | path_rec: &path_rec[i]); |
2900 | else |
2901 | status = -EINVAL; |
2902 | |
2903 | if (status) |
2904 | goto fail; |
2905 | } |
2906 | |
2907 | route->num_pri_alt_paths = 1; |
2908 | queue_work(wq: cma_wq, work: &work->work); |
2909 | return; |
2910 | |
2911 | fail: |
2912 | work->old_state = RDMA_CM_ROUTE_QUERY; |
2913 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
2914 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; |
2915 | work->event.status = status; |
2916 | pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n" , |
2917 | status); |
2918 | queue_work(wq: cma_wq, work: &work->work); |
2919 | } |
2920 | |
2921 | static int cma_query_ib_route(struct rdma_id_private *id_priv, |
2922 | unsigned long timeout_ms, struct cma_work *work) |
2923 | { |
2924 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
2925 | struct sa_path_rec path_rec; |
2926 | ib_sa_comp_mask comp_mask; |
2927 | struct sockaddr_in6 *sin6; |
2928 | struct sockaddr_ib *sib; |
2929 | |
2930 | memset(&path_rec, 0, sizeof path_rec); |
2931 | |
2932 | if (rdma_cap_opa_ah(device: id_priv->id.device, port_num: id_priv->id.port_num)) |
2933 | path_rec.rec_type = SA_PATH_REC_TYPE_OPA; |
2934 | else |
2935 | path_rec.rec_type = SA_PATH_REC_TYPE_IB; |
2936 | rdma_addr_get_sgid(dev_addr, gid: &path_rec.sgid); |
2937 | rdma_addr_get_dgid(dev_addr, gid: &path_rec.dgid); |
2938 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
2939 | path_rec.numb_path = 1; |
2940 | path_rec.reversible = 1; |
2941 | path_rec.service_id = rdma_get_service_id(&id_priv->id, |
2942 | cma_dst_addr(id_priv)); |
2943 | |
2944 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | |
2945 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | |
2946 | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; |
2947 | |
2948 | switch (cma_family(id_priv)) { |
2949 | case AF_INET: |
2950 | path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); |
2951 | comp_mask |= IB_SA_PATH_REC_QOS_CLASS; |
2952 | break; |
2953 | case AF_INET6: |
2954 | sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); |
2955 | path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); |
2956 | comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; |
2957 | break; |
2958 | case AF_IB: |
2959 | sib = (struct sockaddr_ib *) cma_src_addr(id_priv); |
2960 | path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); |
2961 | comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; |
2962 | break; |
2963 | } |
2964 | |
2965 | id_priv->query_id = ib_sa_path_rec_get(client: &sa_client, device: id_priv->id.device, |
2966 | port_num: id_priv->id.port_num, rec: &path_rec, |
2967 | comp_mask, timeout_ms, |
2968 | GFP_KERNEL, callback: cma_query_handler, |
2969 | context: work, query: &id_priv->query); |
2970 | |
2971 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
2972 | } |
2973 | |
2974 | static void cma_iboe_join_work_handler(struct work_struct *work) |
2975 | { |
2976 | struct cma_multicast *mc = |
2977 | container_of(work, struct cma_multicast, iboe_join.work); |
2978 | struct rdma_cm_event *event = &mc->iboe_join.event; |
2979 | struct rdma_id_private *id_priv = mc->id_priv; |
2980 | int ret; |
2981 | |
2982 | mutex_lock(&id_priv->handler_mutex); |
2983 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
2984 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) |
2985 | goto out_unlock; |
2986 | |
2987 | ret = cma_cm_event_handler(id_priv, event); |
2988 | WARN_ON(ret); |
2989 | |
2990 | out_unlock: |
2991 | mutex_unlock(lock: &id_priv->handler_mutex); |
2992 | if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) |
2993 | rdma_destroy_ah_attr(ah_attr: &event->param.ud.ah_attr); |
2994 | } |
2995 | |
2996 | static void cma_work_handler(struct work_struct *_work) |
2997 | { |
2998 | struct cma_work *work = container_of(_work, struct cma_work, work); |
2999 | struct rdma_id_private *id_priv = work->id; |
3000 | |
3001 | mutex_lock(&id_priv->handler_mutex); |
3002 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
3003 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) |
3004 | goto out_unlock; |
3005 | if (work->old_state != 0 || work->new_state != 0) { |
3006 | if (!cma_comp_exch(id_priv, comp: work->old_state, exch: work->new_state)) |
3007 | goto out_unlock; |
3008 | } |
3009 | |
3010 | if (cma_cm_event_handler(id_priv, event: &work->event)) { |
3011 | cma_id_put(id_priv); |
3012 | destroy_id_handler_unlock(id_priv); |
3013 | goto out_free; |
3014 | } |
3015 | |
3016 | out_unlock: |
3017 | mutex_unlock(lock: &id_priv->handler_mutex); |
3018 | cma_id_put(id_priv); |
3019 | out_free: |
3020 | if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) |
3021 | rdma_destroy_ah_attr(ah_attr: &work->event.param.ud.ah_attr); |
3022 | kfree(objp: work); |
3023 | } |
3024 | |
3025 | static void cma_init_resolve_route_work(struct cma_work *work, |
3026 | struct rdma_id_private *id_priv) |
3027 | { |
3028 | work->id = id_priv; |
3029 | INIT_WORK(&work->work, cma_work_handler); |
3030 | work->old_state = RDMA_CM_ROUTE_QUERY; |
3031 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
3032 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
3033 | } |
3034 | |
3035 | static void enqueue_resolve_addr_work(struct cma_work *work, |
3036 | struct rdma_id_private *id_priv) |
3037 | { |
3038 | /* Balances with cma_id_put() in cma_work_handler */ |
3039 | cma_id_get(id_priv); |
3040 | |
3041 | work->id = id_priv; |
3042 | INIT_WORK(&work->work, cma_work_handler); |
3043 | work->old_state = RDMA_CM_ADDR_QUERY; |
3044 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
3045 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
3046 | |
3047 | queue_work(wq: cma_wq, work: &work->work); |
3048 | } |
3049 | |
3050 | static int cma_resolve_ib_route(struct rdma_id_private *id_priv, |
3051 | unsigned long timeout_ms) |
3052 | { |
3053 | struct rdma_route *route = &id_priv->id.route; |
3054 | struct cma_work *work; |
3055 | int ret; |
3056 | |
3057 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
3058 | if (!work) |
3059 | return -ENOMEM; |
3060 | |
3061 | cma_init_resolve_route_work(work, id_priv); |
3062 | |
3063 | if (!route->path_rec) |
3064 | route->path_rec = kmalloc(size: sizeof *route->path_rec, GFP_KERNEL); |
3065 | if (!route->path_rec) { |
3066 | ret = -ENOMEM; |
3067 | goto err1; |
3068 | } |
3069 | |
3070 | ret = cma_query_ib_route(id_priv, timeout_ms, work); |
3071 | if (ret) |
3072 | goto err2; |
3073 | |
3074 | return 0; |
3075 | err2: |
3076 | kfree(objp: route->path_rec); |
3077 | route->path_rec = NULL; |
3078 | err1: |
3079 | kfree(objp: work); |
3080 | return ret; |
3081 | } |
3082 | |
3083 | static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, |
3084 | unsigned long supported_gids, |
3085 | enum ib_gid_type default_gid) |
3086 | { |
3087 | if ((network_type == RDMA_NETWORK_IPV4 || |
3088 | network_type == RDMA_NETWORK_IPV6) && |
3089 | test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) |
3090 | return IB_GID_TYPE_ROCE_UDP_ENCAP; |
3091 | |
3092 | return default_gid; |
3093 | } |
3094 | |
3095 | /* |
3096 | * cma_iboe_set_path_rec_l2_fields() is helper function which sets |
3097 | * path record type based on GID type. |
3098 | * It also sets up other L2 fields which includes destination mac address |
3099 | * netdev ifindex, of the path record. |
3100 | * It returns the netdev of the bound interface for this path record entry. |
3101 | */ |
3102 | static struct net_device * |
3103 | cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) |
3104 | { |
3105 | struct rdma_route *route = &id_priv->id.route; |
3106 | enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; |
3107 | struct rdma_addr *addr = &route->addr; |
3108 | unsigned long supported_gids; |
3109 | struct net_device *ndev; |
3110 | |
3111 | if (!addr->dev_addr.bound_dev_if) |
3112 | return NULL; |
3113 | |
3114 | ndev = dev_get_by_index(net: addr->dev_addr.net, |
3115 | ifindex: addr->dev_addr.bound_dev_if); |
3116 | if (!ndev) |
3117 | return NULL; |
3118 | |
3119 | supported_gids = roce_gid_type_mask_support(ib_dev: id_priv->id.device, |
3120 | port: id_priv->id.port_num); |
3121 | gid_type = cma_route_gid_type(network_type: addr->dev_addr.network, |
3122 | supported_gids, |
3123 | default_gid: id_priv->gid_type); |
3124 | /* Use the hint from IP Stack to select GID Type */ |
3125 | if (gid_type < ib_network_to_gid_type(network_type: addr->dev_addr.network)) |
3126 | gid_type = ib_network_to_gid_type(network_type: addr->dev_addr.network); |
3127 | route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(type: gid_type); |
3128 | |
3129 | route->path_rec->roce.route_resolved = true; |
3130 | sa_path_set_dmac(rec: route->path_rec, dmac: addr->dev_addr.dst_dev_addr); |
3131 | return ndev; |
3132 | } |
3133 | |
3134 | int rdma_set_ib_path(struct rdma_cm_id *id, |
3135 | struct sa_path_rec *path_rec) |
3136 | { |
3137 | struct rdma_id_private *id_priv; |
3138 | struct net_device *ndev; |
3139 | int ret; |
3140 | |
3141 | id_priv = container_of(id, struct rdma_id_private, id); |
3142 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_RESOLVED, |
3143 | exch: RDMA_CM_ROUTE_RESOLVED)) |
3144 | return -EINVAL; |
3145 | |
3146 | id->route.path_rec = kmemdup(p: path_rec, size: sizeof(*path_rec), |
3147 | GFP_KERNEL); |
3148 | if (!id->route.path_rec) { |
3149 | ret = -ENOMEM; |
3150 | goto err; |
3151 | } |
3152 | |
3153 | if (rdma_protocol_roce(device: id->device, port_num: id->port_num)) { |
3154 | ndev = cma_iboe_set_path_rec_l2_fields(id_priv); |
3155 | if (!ndev) { |
3156 | ret = -ENODEV; |
3157 | goto err_free; |
3158 | } |
3159 | dev_put(dev: ndev); |
3160 | } |
3161 | |
3162 | id->route.num_pri_alt_paths = 1; |
3163 | return 0; |
3164 | |
3165 | err_free: |
3166 | kfree(objp: id->route.path_rec); |
3167 | id->route.path_rec = NULL; |
3168 | err: |
3169 | cma_comp_exch(id_priv, comp: RDMA_CM_ROUTE_RESOLVED, exch: RDMA_CM_ADDR_RESOLVED); |
3170 | return ret; |
3171 | } |
3172 | EXPORT_SYMBOL(rdma_set_ib_path); |
3173 | |
3174 | static int cma_resolve_iw_route(struct rdma_id_private *id_priv) |
3175 | { |
3176 | struct cma_work *work; |
3177 | |
3178 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
3179 | if (!work) |
3180 | return -ENOMEM; |
3181 | |
3182 | cma_init_resolve_route_work(work, id_priv); |
3183 | queue_work(wq: cma_wq, work: &work->work); |
3184 | return 0; |
3185 | } |
3186 | |
3187 | static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) |
3188 | { |
3189 | struct net_device *dev; |
3190 | |
3191 | dev = vlan_dev_real_dev(dev: vlan_ndev); |
3192 | if (dev->num_tc) |
3193 | return netdev_get_prio_tc_map(dev, prio); |
3194 | |
3195 | return (vlan_dev_get_egress_qos_mask(dev: vlan_ndev, skprio: prio) & |
3196 | VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; |
3197 | } |
3198 | |
3199 | struct iboe_prio_tc_map { |
3200 | int input_prio; |
3201 | int output_tc; |
3202 | bool found; |
3203 | }; |
3204 | |
3205 | static int get_lower_vlan_dev_tc(struct net_device *dev, |
3206 | struct netdev_nested_priv *priv) |
3207 | { |
3208 | struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; |
3209 | |
3210 | if (is_vlan_dev(dev)) |
3211 | map->output_tc = get_vlan_ndev_tc(vlan_ndev: dev, prio: map->input_prio); |
3212 | else if (dev->num_tc) |
3213 | map->output_tc = netdev_get_prio_tc_map(dev, prio: map->input_prio); |
3214 | else |
3215 | map->output_tc = 0; |
3216 | /* We are interested only in first level VLAN device, so always |
3217 | * return 1 to stop iterating over next level devices. |
3218 | */ |
3219 | map->found = true; |
3220 | return 1; |
3221 | } |
3222 | |
3223 | static int iboe_tos_to_sl(struct net_device *ndev, int tos) |
3224 | { |
3225 | struct iboe_prio_tc_map prio_tc_map = {}; |
3226 | int prio = rt_tos2priority(tos); |
3227 | struct netdev_nested_priv priv; |
3228 | |
3229 | /* If VLAN device, get it directly from the VLAN netdev */ |
3230 | if (is_vlan_dev(dev: ndev)) |
3231 | return get_vlan_ndev_tc(vlan_ndev: ndev, prio); |
3232 | |
3233 | prio_tc_map.input_prio = prio; |
3234 | priv.data = (void *)&prio_tc_map; |
3235 | rcu_read_lock(); |
3236 | netdev_walk_all_lower_dev_rcu(dev: ndev, |
3237 | fn: get_lower_vlan_dev_tc, |
3238 | priv: &priv); |
3239 | rcu_read_unlock(); |
3240 | /* If map is found from lower device, use it; Otherwise |
3241 | * continue with the current netdevice to get priority to tc map. |
3242 | */ |
3243 | if (prio_tc_map.found) |
3244 | return prio_tc_map.output_tc; |
3245 | else if (ndev->num_tc) |
3246 | return netdev_get_prio_tc_map(dev: ndev, prio); |
3247 | else |
3248 | return 0; |
3249 | } |
3250 | |
3251 | static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) |
3252 | { |
3253 | struct sockaddr_in6 *addr6; |
3254 | u16 dport, sport; |
3255 | u32 hash, fl; |
3256 | |
3257 | addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); |
3258 | fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; |
3259 | if ((cma_family(id_priv) != AF_INET6) || !fl) { |
3260 | dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); |
3261 | sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); |
3262 | hash = (u32)sport * 31 + dport; |
3263 | fl = hash & IB_GRH_FLOWLABEL_MASK; |
3264 | } |
3265 | |
3266 | return cpu_to_be32(fl); |
3267 | } |
3268 | |
3269 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
3270 | { |
3271 | struct rdma_route *route = &id_priv->id.route; |
3272 | struct rdma_addr *addr = &route->addr; |
3273 | struct cma_work *work; |
3274 | int ret; |
3275 | struct net_device *ndev; |
3276 | |
3277 | u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - |
3278 | rdma_start_port(device: id_priv->cma_dev->device)]; |
3279 | u8 tos; |
3280 | |
3281 | mutex_lock(&id_priv->qp_mutex); |
3282 | tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; |
3283 | mutex_unlock(lock: &id_priv->qp_mutex); |
3284 | |
3285 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
3286 | if (!work) |
3287 | return -ENOMEM; |
3288 | |
3289 | route->path_rec = kzalloc(size: sizeof *route->path_rec, GFP_KERNEL); |
3290 | if (!route->path_rec) { |
3291 | ret = -ENOMEM; |
3292 | goto err1; |
3293 | } |
3294 | |
3295 | route->num_pri_alt_paths = 1; |
3296 | |
3297 | ndev = cma_iboe_set_path_rec_l2_fields(id_priv); |
3298 | if (!ndev) { |
3299 | ret = -ENODEV; |
3300 | goto err2; |
3301 | } |
3302 | |
3303 | rdma_ip2gid(addr: (struct sockaddr *)&id_priv->id.route.addr.src_addr, |
3304 | gid: &route->path_rec->sgid); |
3305 | rdma_ip2gid(addr: (struct sockaddr *)&id_priv->id.route.addr.dst_addr, |
3306 | gid: &route->path_rec->dgid); |
3307 | |
3308 | if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) |
3309 | /* TODO: get the hoplimit from the inet/inet6 device */ |
3310 | route->path_rec->hop_limit = addr->dev_addr.hoplimit; |
3311 | else |
3312 | route->path_rec->hop_limit = 1; |
3313 | route->path_rec->reversible = 1; |
3314 | route->path_rec->pkey = cpu_to_be16(0xffff); |
3315 | route->path_rec->mtu_selector = IB_SA_EQ; |
3316 | route->path_rec->sl = iboe_tos_to_sl(ndev, tos); |
3317 | route->path_rec->traffic_class = tos; |
3318 | route->path_rec->mtu = iboe_get_mtu(mtu: ndev->mtu); |
3319 | route->path_rec->rate_selector = IB_SA_EQ; |
3320 | route->path_rec->rate = IB_RATE_PORT_CURRENT; |
3321 | dev_put(dev: ndev); |
3322 | route->path_rec->packet_life_time_selector = IB_SA_EQ; |
3323 | /* In case ACK timeout is set, use this value to calculate |
3324 | * PacketLifeTime. As per IBTA 12.7.34, |
3325 | * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). |
3326 | * Assuming a negligible local ACK delay, we can use |
3327 | * PacketLifeTime = local ACK timeout/2 |
3328 | * as a reasonable approximation for RoCE networks. |
3329 | */ |
3330 | mutex_lock(&id_priv->qp_mutex); |
3331 | if (id_priv->timeout_set && id_priv->timeout) |
3332 | route->path_rec->packet_life_time = id_priv->timeout - 1; |
3333 | else |
3334 | route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; |
3335 | mutex_unlock(lock: &id_priv->qp_mutex); |
3336 | |
3337 | if (!route->path_rec->mtu) { |
3338 | ret = -EINVAL; |
3339 | goto err2; |
3340 | } |
3341 | |
3342 | if (rdma_protocol_roce_udp_encap(device: id_priv->id.device, |
3343 | port_num: id_priv->id.port_num)) |
3344 | route->path_rec->flow_label = |
3345 | cma_get_roce_udp_flow_label(id_priv); |
3346 | |
3347 | cma_init_resolve_route_work(work, id_priv); |
3348 | queue_work(wq: cma_wq, work: &work->work); |
3349 | |
3350 | return 0; |
3351 | |
3352 | err2: |
3353 | kfree(objp: route->path_rec); |
3354 | route->path_rec = NULL; |
3355 | route->num_pri_alt_paths = 0; |
3356 | err1: |
3357 | kfree(objp: work); |
3358 | return ret; |
3359 | } |
3360 | |
3361 | int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) |
3362 | { |
3363 | struct rdma_id_private *id_priv; |
3364 | int ret; |
3365 | |
3366 | if (!timeout_ms) |
3367 | return -EINVAL; |
3368 | |
3369 | id_priv = container_of(id, struct rdma_id_private, id); |
3370 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_RESOLVED, exch: RDMA_CM_ROUTE_QUERY)) |
3371 | return -EINVAL; |
3372 | |
3373 | cma_id_get(id_priv); |
3374 | if (rdma_cap_ib_sa(device: id->device, port_num: id->port_num)) |
3375 | ret = cma_resolve_ib_route(id_priv, timeout_ms); |
3376 | else if (rdma_protocol_roce(device: id->device, port_num: id->port_num)) { |
3377 | ret = cma_resolve_iboe_route(id_priv); |
3378 | if (!ret) |
3379 | cma_add_id_to_tree(node_id_priv: id_priv); |
3380 | } |
3381 | else if (rdma_protocol_iwarp(device: id->device, port_num: id->port_num)) |
3382 | ret = cma_resolve_iw_route(id_priv); |
3383 | else |
3384 | ret = -ENOSYS; |
3385 | |
3386 | if (ret) |
3387 | goto err; |
3388 | |
3389 | return 0; |
3390 | err: |
3391 | cma_comp_exch(id_priv, comp: RDMA_CM_ROUTE_QUERY, exch: RDMA_CM_ADDR_RESOLVED); |
3392 | cma_id_put(id_priv); |
3393 | return ret; |
3394 | } |
3395 | EXPORT_SYMBOL(rdma_resolve_route); |
3396 | |
3397 | static void cma_set_loopback(struct sockaddr *addr) |
3398 | { |
3399 | switch (addr->sa_family) { |
3400 | case AF_INET: |
3401 | ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); |
3402 | break; |
3403 | case AF_INET6: |
3404 | ipv6_addr_set(addr: &((struct sockaddr_in6 *) addr)->sin6_addr, |
3405 | w1: 0, w2: 0, w3: 0, htonl(1)); |
3406 | break; |
3407 | default: |
3408 | ib_addr_set(addr: &((struct sockaddr_ib *) addr)->sib_addr, |
3409 | w1: 0, w2: 0, w3: 0, htonl(1)); |
3410 | break; |
3411 | } |
3412 | } |
3413 | |
3414 | static int cma_bind_loopback(struct rdma_id_private *id_priv) |
3415 | { |
3416 | struct cma_device *cma_dev, *cur_dev; |
3417 | union ib_gid gid; |
3418 | enum ib_port_state port_state; |
3419 | unsigned int p; |
3420 | u16 pkey; |
3421 | int ret; |
3422 | |
3423 | cma_dev = NULL; |
3424 | mutex_lock(&lock); |
3425 | list_for_each_entry(cur_dev, &dev_list, list) { |
3426 | if (cma_family(id_priv) == AF_IB && |
3427 | !rdma_cap_ib_cm(device: cur_dev->device, port_num: 1)) |
3428 | continue; |
3429 | |
3430 | if (!cma_dev) |
3431 | cma_dev = cur_dev; |
3432 | |
3433 | rdma_for_each_port (cur_dev->device, p) { |
3434 | if (!ib_get_cached_port_state(device: cur_dev->device, port_num: p, port_active: &port_state) && |
3435 | port_state == IB_PORT_ACTIVE) { |
3436 | cma_dev = cur_dev; |
3437 | goto port_found; |
3438 | } |
3439 | } |
3440 | } |
3441 | |
3442 | if (!cma_dev) { |
3443 | ret = -ENODEV; |
3444 | goto out; |
3445 | } |
3446 | |
3447 | p = 1; |
3448 | |
3449 | port_found: |
3450 | ret = rdma_query_gid(device: cma_dev->device, port_num: p, index: 0, gid: &gid); |
3451 | if (ret) |
3452 | goto out; |
3453 | |
3454 | ret = ib_get_cached_pkey(device_handle: cma_dev->device, port_num: p, index: 0, pkey: &pkey); |
3455 | if (ret) |
3456 | goto out; |
3457 | |
3458 | id_priv->id.route.addr.dev_addr.dev_type = |
3459 | (rdma_protocol_ib(device: cma_dev->device, port_num: p)) ? |
3460 | ARPHRD_INFINIBAND : ARPHRD_ETHER; |
3461 | |
3462 | rdma_addr_set_sgid(dev_addr: &id_priv->id.route.addr.dev_addr, gid: &gid); |
3463 | ib_addr_set_pkey(dev_addr: &id_priv->id.route.addr.dev_addr, pkey); |
3464 | id_priv->id.port_num = p; |
3465 | cma_attach_to_dev(id_priv, cma_dev); |
3466 | rdma_restrack_add(res: &id_priv->res); |
3467 | cma_set_loopback(addr: cma_src_addr(id_priv)); |
3468 | out: |
3469 | mutex_unlock(lock: &lock); |
3470 | return ret; |
3471 | } |
3472 | |
3473 | static void addr_handler(int status, struct sockaddr *src_addr, |
3474 | struct rdma_dev_addr *dev_addr, void *context) |
3475 | { |
3476 | struct rdma_id_private *id_priv = context; |
3477 | struct rdma_cm_event event = {}; |
3478 | struct sockaddr *addr; |
3479 | struct sockaddr_storage old_addr; |
3480 | |
3481 | mutex_lock(&id_priv->handler_mutex); |
3482 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_QUERY, |
3483 | exch: RDMA_CM_ADDR_RESOLVED)) |
3484 | goto out; |
3485 | |
3486 | /* |
3487 | * Store the previous src address, so that if we fail to acquire |
3488 | * matching rdma device, old address can be restored back, which helps |
3489 | * to cancel the cma listen operation correctly. |
3490 | */ |
3491 | addr = cma_src_addr(id_priv); |
3492 | memcpy(&old_addr, addr, rdma_addr_size(addr)); |
3493 | memcpy(addr, src_addr, rdma_addr_size(src_addr)); |
3494 | if (!status && !id_priv->cma_dev) { |
3495 | status = cma_acquire_dev_by_src_ip(id_priv); |
3496 | if (status) |
3497 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n" , |
3498 | status); |
3499 | rdma_restrack_add(res: &id_priv->res); |
3500 | } else if (status) { |
3501 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n" , status); |
3502 | } |
3503 | |
3504 | if (status) { |
3505 | memcpy(addr, &old_addr, |
3506 | rdma_addr_size((struct sockaddr *)&old_addr)); |
3507 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_RESOLVED, |
3508 | exch: RDMA_CM_ADDR_BOUND)) |
3509 | goto out; |
3510 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
3511 | event.status = status; |
3512 | } else |
3513 | event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
3514 | |
3515 | if (cma_cm_event_handler(id_priv, event: &event)) { |
3516 | destroy_id_handler_unlock(id_priv); |
3517 | return; |
3518 | } |
3519 | out: |
3520 | mutex_unlock(lock: &id_priv->handler_mutex); |
3521 | } |
3522 | |
3523 | static int cma_resolve_loopback(struct rdma_id_private *id_priv) |
3524 | { |
3525 | struct cma_work *work; |
3526 | union ib_gid gid; |
3527 | int ret; |
3528 | |
3529 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
3530 | if (!work) |
3531 | return -ENOMEM; |
3532 | |
3533 | if (!id_priv->cma_dev) { |
3534 | ret = cma_bind_loopback(id_priv); |
3535 | if (ret) |
3536 | goto err; |
3537 | } |
3538 | |
3539 | rdma_addr_get_sgid(dev_addr: &id_priv->id.route.addr.dev_addr, gid: &gid); |
3540 | rdma_addr_set_dgid(dev_addr: &id_priv->id.route.addr.dev_addr, gid: &gid); |
3541 | |
3542 | enqueue_resolve_addr_work(work, id_priv); |
3543 | return 0; |
3544 | err: |
3545 | kfree(objp: work); |
3546 | return ret; |
3547 | } |
3548 | |
3549 | static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) |
3550 | { |
3551 | struct cma_work *work; |
3552 | int ret; |
3553 | |
3554 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
3555 | if (!work) |
3556 | return -ENOMEM; |
3557 | |
3558 | if (!id_priv->cma_dev) { |
3559 | ret = cma_resolve_ib_dev(id_priv); |
3560 | if (ret) |
3561 | goto err; |
3562 | } |
3563 | |
3564 | rdma_addr_set_dgid(dev_addr: &id_priv->id.route.addr.dev_addr, gid: (union ib_gid *) |
3565 | &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); |
3566 | |
3567 | enqueue_resolve_addr_work(work, id_priv); |
3568 | return 0; |
3569 | err: |
3570 | kfree(objp: work); |
3571 | return ret; |
3572 | } |
3573 | |
3574 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) |
3575 | { |
3576 | struct rdma_id_private *id_priv; |
3577 | unsigned long flags; |
3578 | int ret; |
3579 | |
3580 | id_priv = container_of(id, struct rdma_id_private, id); |
3581 | spin_lock_irqsave(&id_priv->lock, flags); |
3582 | if ((reuse && id_priv->state != RDMA_CM_LISTEN) || |
3583 | id_priv->state == RDMA_CM_IDLE) { |
3584 | id_priv->reuseaddr = reuse; |
3585 | ret = 0; |
3586 | } else { |
3587 | ret = -EINVAL; |
3588 | } |
3589 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
3590 | return ret; |
3591 | } |
3592 | EXPORT_SYMBOL(rdma_set_reuseaddr); |
3593 | |
3594 | int rdma_set_afonly(struct rdma_cm_id *id, int afonly) |
3595 | { |
3596 | struct rdma_id_private *id_priv; |
3597 | unsigned long flags; |
3598 | int ret; |
3599 | |
3600 | id_priv = container_of(id, struct rdma_id_private, id); |
3601 | spin_lock_irqsave(&id_priv->lock, flags); |
3602 | if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { |
3603 | id_priv->options |= (1 << CMA_OPTION_AFONLY); |
3604 | id_priv->afonly = afonly; |
3605 | ret = 0; |
3606 | } else { |
3607 | ret = -EINVAL; |
3608 | } |
3609 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
3610 | return ret; |
3611 | } |
3612 | EXPORT_SYMBOL(rdma_set_afonly); |
3613 | |
3614 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
3615 | struct rdma_id_private *id_priv) |
3616 | { |
3617 | struct sockaddr *addr; |
3618 | struct sockaddr_ib *sib; |
3619 | u64 sid, mask; |
3620 | __be16 port; |
3621 | |
3622 | lockdep_assert_held(&lock); |
3623 | |
3624 | addr = cma_src_addr(id_priv); |
3625 | port = htons(bind_list->port); |
3626 | |
3627 | switch (addr->sa_family) { |
3628 | case AF_INET: |
3629 | ((struct sockaddr_in *) addr)->sin_port = port; |
3630 | break; |
3631 | case AF_INET6: |
3632 | ((struct sockaddr_in6 *) addr)->sin6_port = port; |
3633 | break; |
3634 | case AF_IB: |
3635 | sib = (struct sockaddr_ib *) addr; |
3636 | sid = be64_to_cpu(sib->sib_sid); |
3637 | mask = be64_to_cpu(sib->sib_sid_mask); |
3638 | sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); |
3639 | sib->sib_sid_mask = cpu_to_be64(~0ULL); |
3640 | break; |
3641 | } |
3642 | id_priv->bind_list = bind_list; |
3643 | hlist_add_head(n: &id_priv->node, h: &bind_list->owners); |
3644 | } |
3645 | |
3646 | static int cma_alloc_port(enum rdma_ucm_port_space ps, |
3647 | struct rdma_id_private *id_priv, unsigned short snum) |
3648 | { |
3649 | struct rdma_bind_list *bind_list; |
3650 | int ret; |
3651 | |
3652 | lockdep_assert_held(&lock); |
3653 | |
3654 | bind_list = kzalloc(size: sizeof *bind_list, GFP_KERNEL); |
3655 | if (!bind_list) |
3656 | return -ENOMEM; |
3657 | |
3658 | ret = cma_ps_alloc(net: id_priv->id.route.addr.dev_addr.net, ps, bind_list, |
3659 | snum); |
3660 | if (ret < 0) |
3661 | goto err; |
3662 | |
3663 | bind_list->ps = ps; |
3664 | bind_list->port = snum; |
3665 | cma_bind_port(bind_list, id_priv); |
3666 | return 0; |
3667 | err: |
3668 | kfree(objp: bind_list); |
3669 | return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; |
3670 | } |
3671 | |
3672 | static int cma_port_is_unique(struct rdma_bind_list *bind_list, |
3673 | struct rdma_id_private *id_priv) |
3674 | { |
3675 | struct rdma_id_private *cur_id; |
3676 | struct sockaddr *daddr = cma_dst_addr(id_priv); |
3677 | struct sockaddr *saddr = cma_src_addr(id_priv); |
3678 | __be16 dport = cma_port(addr: daddr); |
3679 | |
3680 | lockdep_assert_held(&lock); |
3681 | |
3682 | hlist_for_each_entry(cur_id, &bind_list->owners, node) { |
3683 | struct sockaddr *cur_daddr = cma_dst_addr(id_priv: cur_id); |
3684 | struct sockaddr *cur_saddr = cma_src_addr(id_priv: cur_id); |
3685 | __be16 cur_dport = cma_port(addr: cur_daddr); |
3686 | |
3687 | if (id_priv == cur_id) |
3688 | continue; |
3689 | |
3690 | /* different dest port -> unique */ |
3691 | if (!cma_any_port(addr: daddr) && |
3692 | !cma_any_port(addr: cur_daddr) && |
3693 | (dport != cur_dport)) |
3694 | continue; |
3695 | |
3696 | /* different src address -> unique */ |
3697 | if (!cma_any_addr(addr: saddr) && |
3698 | !cma_any_addr(addr: cur_saddr) && |
3699 | cma_addr_cmp(src: saddr, dst: cur_saddr)) |
3700 | continue; |
3701 | |
3702 | /* different dst address -> unique */ |
3703 | if (!cma_any_addr(addr: daddr) && |
3704 | !cma_any_addr(addr: cur_daddr) && |
3705 | cma_addr_cmp(src: daddr, dst: cur_daddr)) |
3706 | continue; |
3707 | |
3708 | return -EADDRNOTAVAIL; |
3709 | } |
3710 | return 0; |
3711 | } |
3712 | |
3713 | static int cma_alloc_any_port(enum rdma_ucm_port_space ps, |
3714 | struct rdma_id_private *id_priv) |
3715 | { |
3716 | static unsigned int last_used_port; |
3717 | int low, high, remaining; |
3718 | unsigned int rover; |
3719 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
3720 | |
3721 | lockdep_assert_held(&lock); |
3722 | |
3723 | inet_get_local_port_range(net, low: &low, high: &high); |
3724 | remaining = (high - low) + 1; |
3725 | rover = get_random_u32_inclusive(floor: low, ceil: remaining + low - 1); |
3726 | retry: |
3727 | if (last_used_port != rover) { |
3728 | struct rdma_bind_list *bind_list; |
3729 | int ret; |
3730 | |
3731 | bind_list = cma_ps_find(net, ps, snum: (unsigned short)rover); |
3732 | |
3733 | if (!bind_list) { |
3734 | ret = cma_alloc_port(ps, id_priv, snum: rover); |
3735 | } else { |
3736 | ret = cma_port_is_unique(bind_list, id_priv); |
3737 | if (!ret) |
3738 | cma_bind_port(bind_list, id_priv); |
3739 | } |
3740 | /* |
3741 | * Remember previously used port number in order to avoid |
3742 | * re-using same port immediately after it is closed. |
3743 | */ |
3744 | if (!ret) |
3745 | last_used_port = rover; |
3746 | if (ret != -EADDRNOTAVAIL) |
3747 | return ret; |
3748 | } |
3749 | if (--remaining) { |
3750 | rover++; |
3751 | if ((rover < low) || (rover > high)) |
3752 | rover = low; |
3753 | goto retry; |
3754 | } |
3755 | return -EADDRNOTAVAIL; |
3756 | } |
3757 | |
3758 | /* |
3759 | * Check that the requested port is available. This is called when trying to |
3760 | * bind to a specific port, or when trying to listen on a bound port. In |
3761 | * the latter case, the provided id_priv may already be on the bind_list, but |
3762 | * we still need to check that it's okay to start listening. |
3763 | */ |
3764 | static int cma_check_port(struct rdma_bind_list *bind_list, |
3765 | struct rdma_id_private *id_priv, uint8_t reuseaddr) |
3766 | { |
3767 | struct rdma_id_private *cur_id; |
3768 | struct sockaddr *addr, *cur_addr; |
3769 | |
3770 | lockdep_assert_held(&lock); |
3771 | |
3772 | addr = cma_src_addr(id_priv); |
3773 | hlist_for_each_entry(cur_id, &bind_list->owners, node) { |
3774 | if (id_priv == cur_id) |
3775 | continue; |
3776 | |
3777 | if (reuseaddr && cur_id->reuseaddr) |
3778 | continue; |
3779 | |
3780 | cur_addr = cma_src_addr(id_priv: cur_id); |
3781 | if (id_priv->afonly && cur_id->afonly && |
3782 | (addr->sa_family != cur_addr->sa_family)) |
3783 | continue; |
3784 | |
3785 | if (cma_any_addr(addr) || cma_any_addr(addr: cur_addr)) |
3786 | return -EADDRNOTAVAIL; |
3787 | |
3788 | if (!cma_addr_cmp(src: addr, dst: cur_addr)) |
3789 | return -EADDRINUSE; |
3790 | } |
3791 | return 0; |
3792 | } |
3793 | |
3794 | static int cma_use_port(enum rdma_ucm_port_space ps, |
3795 | struct rdma_id_private *id_priv) |
3796 | { |
3797 | struct rdma_bind_list *bind_list; |
3798 | unsigned short snum; |
3799 | int ret; |
3800 | |
3801 | lockdep_assert_held(&lock); |
3802 | |
3803 | snum = ntohs(cma_port(cma_src_addr(id_priv))); |
3804 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
3805 | return -EACCES; |
3806 | |
3807 | bind_list = cma_ps_find(net: id_priv->id.route.addr.dev_addr.net, ps, snum); |
3808 | if (!bind_list) { |
3809 | ret = cma_alloc_port(ps, id_priv, snum); |
3810 | } else { |
3811 | ret = cma_check_port(bind_list, id_priv, reuseaddr: id_priv->reuseaddr); |
3812 | if (!ret) |
3813 | cma_bind_port(bind_list, id_priv); |
3814 | } |
3815 | return ret; |
3816 | } |
3817 | |
3818 | static enum rdma_ucm_port_space |
3819 | cma_select_inet_ps(struct rdma_id_private *id_priv) |
3820 | { |
3821 | switch (id_priv->id.ps) { |
3822 | case RDMA_PS_TCP: |
3823 | case RDMA_PS_UDP: |
3824 | case RDMA_PS_IPOIB: |
3825 | case RDMA_PS_IB: |
3826 | return id_priv->id.ps; |
3827 | default: |
3828 | |
3829 | return 0; |
3830 | } |
3831 | } |
3832 | |
3833 | static enum rdma_ucm_port_space |
3834 | cma_select_ib_ps(struct rdma_id_private *id_priv) |
3835 | { |
3836 | enum rdma_ucm_port_space ps = 0; |
3837 | struct sockaddr_ib *sib; |
3838 | u64 sid_ps, mask, sid; |
3839 | |
3840 | sib = (struct sockaddr_ib *) cma_src_addr(id_priv); |
3841 | mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; |
3842 | sid = be64_to_cpu(sib->sib_sid) & mask; |
3843 | |
3844 | if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { |
3845 | sid_ps = RDMA_IB_IP_PS_IB; |
3846 | ps = RDMA_PS_IB; |
3847 | } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && |
3848 | (sid == (RDMA_IB_IP_PS_TCP & mask))) { |
3849 | sid_ps = RDMA_IB_IP_PS_TCP; |
3850 | ps = RDMA_PS_TCP; |
3851 | } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && |
3852 | (sid == (RDMA_IB_IP_PS_UDP & mask))) { |
3853 | sid_ps = RDMA_IB_IP_PS_UDP; |
3854 | ps = RDMA_PS_UDP; |
3855 | } |
3856 | |
3857 | if (ps) { |
3858 | sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); |
3859 | sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | |
3860 | be64_to_cpu(sib->sib_sid_mask)); |
3861 | } |
3862 | return ps; |
3863 | } |
3864 | |
3865 | static int cma_get_port(struct rdma_id_private *id_priv) |
3866 | { |
3867 | enum rdma_ucm_port_space ps; |
3868 | int ret; |
3869 | |
3870 | if (cma_family(id_priv) != AF_IB) |
3871 | ps = cma_select_inet_ps(id_priv); |
3872 | else |
3873 | ps = cma_select_ib_ps(id_priv); |
3874 | if (!ps) |
3875 | return -EPROTONOSUPPORT; |
3876 | |
3877 | mutex_lock(&lock); |
3878 | if (cma_any_port(addr: cma_src_addr(id_priv))) |
3879 | ret = cma_alloc_any_port(ps, id_priv); |
3880 | else |
3881 | ret = cma_use_port(ps, id_priv); |
3882 | mutex_unlock(lock: &lock); |
3883 | |
3884 | return ret; |
3885 | } |
3886 | |
3887 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, |
3888 | struct sockaddr *addr) |
3889 | { |
3890 | #if IS_ENABLED(CONFIG_IPV6) |
3891 | struct sockaddr_in6 *sin6; |
3892 | |
3893 | if (addr->sa_family != AF_INET6) |
3894 | return 0; |
3895 | |
3896 | sin6 = (struct sockaddr_in6 *) addr; |
3897 | |
3898 | if (!(ipv6_addr_type(addr: &sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) |
3899 | return 0; |
3900 | |
3901 | if (!sin6->sin6_scope_id) |
3902 | return -EINVAL; |
3903 | |
3904 | dev_addr->bound_dev_if = sin6->sin6_scope_id; |
3905 | #endif |
3906 | return 0; |
3907 | } |
3908 | |
3909 | int rdma_listen(struct rdma_cm_id *id, int backlog) |
3910 | { |
3911 | struct rdma_id_private *id_priv = |
3912 | container_of(id, struct rdma_id_private, id); |
3913 | int ret; |
3914 | |
3915 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_BOUND, exch: RDMA_CM_LISTEN)) { |
3916 | struct sockaddr_in any_in = { |
3917 | .sin_family = AF_INET, |
3918 | .sin_addr.s_addr = htonl(INADDR_ANY), |
3919 | }; |
3920 | |
3921 | /* For a well behaved ULP state will be RDMA_CM_IDLE */ |
3922 | ret = rdma_bind_addr(id, addr: (struct sockaddr *)&any_in); |
3923 | if (ret) |
3924 | return ret; |
3925 | if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, |
3926 | RDMA_CM_LISTEN))) |
3927 | return -EINVAL; |
3928 | } |
3929 | |
3930 | /* |
3931 | * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable |
3932 | * any more, and has to be unique in the bind list. |
3933 | */ |
3934 | if (id_priv->reuseaddr) { |
3935 | mutex_lock(&lock); |
3936 | ret = cma_check_port(bind_list: id_priv->bind_list, id_priv, reuseaddr: 0); |
3937 | if (!ret) |
3938 | id_priv->reuseaddr = 0; |
3939 | mutex_unlock(lock: &lock); |
3940 | if (ret) |
3941 | goto err; |
3942 | } |
3943 | |
3944 | id_priv->backlog = backlog; |
3945 | if (id_priv->cma_dev) { |
3946 | if (rdma_cap_ib_cm(device: id->device, port_num: 1)) { |
3947 | ret = cma_ib_listen(id_priv); |
3948 | if (ret) |
3949 | goto err; |
3950 | } else if (rdma_cap_iw_cm(device: id->device, port_num: 1)) { |
3951 | ret = cma_iw_listen(id_priv, backlog); |
3952 | if (ret) |
3953 | goto err; |
3954 | } else { |
3955 | ret = -ENOSYS; |
3956 | goto err; |
3957 | } |
3958 | } else { |
3959 | ret = cma_listen_on_all(id_priv); |
3960 | if (ret) |
3961 | goto err; |
3962 | } |
3963 | |
3964 | return 0; |
3965 | err: |
3966 | id_priv->backlog = 0; |
3967 | /* |
3968 | * All the failure paths that lead here will not allow the req_handler's |
3969 | * to have run. |
3970 | */ |
3971 | cma_comp_exch(id_priv, comp: RDMA_CM_LISTEN, exch: RDMA_CM_ADDR_BOUND); |
3972 | return ret; |
3973 | } |
3974 | EXPORT_SYMBOL(rdma_listen); |
3975 | |
3976 | static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, |
3977 | struct sockaddr *addr, const struct sockaddr *daddr) |
3978 | { |
3979 | struct sockaddr *id_daddr; |
3980 | int ret; |
3981 | |
3982 | if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && |
3983 | addr->sa_family != AF_IB) |
3984 | return -EAFNOSUPPORT; |
3985 | |
3986 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_IDLE, exch: RDMA_CM_ADDR_BOUND)) |
3987 | return -EINVAL; |
3988 | |
3989 | ret = cma_check_linklocal(dev_addr: &id_priv->id.route.addr.dev_addr, addr); |
3990 | if (ret) |
3991 | goto err1; |
3992 | |
3993 | memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); |
3994 | if (!cma_any_addr(addr)) { |
3995 | ret = cma_translate_addr(addr, dev_addr: &id_priv->id.route.addr.dev_addr); |
3996 | if (ret) |
3997 | goto err1; |
3998 | |
3999 | ret = cma_acquire_dev_by_src_ip(id_priv); |
4000 | if (ret) |
4001 | goto err1; |
4002 | } |
4003 | |
4004 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { |
4005 | if (addr->sa_family == AF_INET) |
4006 | id_priv->afonly = 1; |
4007 | #if IS_ENABLED(CONFIG_IPV6) |
4008 | else if (addr->sa_family == AF_INET6) { |
4009 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
4010 | |
4011 | id_priv->afonly = net->ipv6.sysctl.bindv6only; |
4012 | } |
4013 | #endif |
4014 | } |
4015 | id_daddr = cma_dst_addr(id_priv); |
4016 | if (daddr != id_daddr) |
4017 | memcpy(id_daddr, daddr, rdma_addr_size(addr)); |
4018 | id_daddr->sa_family = addr->sa_family; |
4019 | |
4020 | ret = cma_get_port(id_priv); |
4021 | if (ret) |
4022 | goto err2; |
4023 | |
4024 | if (!cma_any_addr(addr)) |
4025 | rdma_restrack_add(res: &id_priv->res); |
4026 | return 0; |
4027 | err2: |
4028 | if (id_priv->cma_dev) |
4029 | cma_release_dev(id_priv); |
4030 | err1: |
4031 | cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_BOUND, exch: RDMA_CM_IDLE); |
4032 | return ret; |
4033 | } |
4034 | |
4035 | static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, |
4036 | const struct sockaddr *dst_addr) |
4037 | { |
4038 | struct rdma_id_private *id_priv = |
4039 | container_of(id, struct rdma_id_private, id); |
4040 | struct sockaddr_storage zero_sock = {}; |
4041 | |
4042 | if (src_addr && src_addr->sa_family) |
4043 | return rdma_bind_addr_dst(id_priv, addr: src_addr, daddr: dst_addr); |
4044 | |
4045 | /* |
4046 | * When the src_addr is not specified, automatically supply an any addr |
4047 | */ |
4048 | zero_sock.ss_family = dst_addr->sa_family; |
4049 | if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { |
4050 | struct sockaddr_in6 *src_addr6 = |
4051 | (struct sockaddr_in6 *)&zero_sock; |
4052 | struct sockaddr_in6 *dst_addr6 = |
4053 | (struct sockaddr_in6 *)dst_addr; |
4054 | |
4055 | src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; |
4056 | if (ipv6_addr_type(addr: &dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
4057 | id->route.addr.dev_addr.bound_dev_if = |
4058 | dst_addr6->sin6_scope_id; |
4059 | } else if (dst_addr->sa_family == AF_IB) { |
4060 | ((struct sockaddr_ib *)&zero_sock)->sib_pkey = |
4061 | ((struct sockaddr_ib *)dst_addr)->sib_pkey; |
4062 | } |
4063 | return rdma_bind_addr_dst(id_priv, addr: (struct sockaddr *)&zero_sock, daddr: dst_addr); |
4064 | } |
4065 | |
4066 | /* |
4067 | * If required, resolve the source address for bind and leave the id_priv in |
4068 | * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior |
4069 | * calls made by ULP, a previously bound ID will not be re-bound and src_addr is |
4070 | * ignored. |
4071 | */ |
4072 | static int resolve_prepare_src(struct rdma_id_private *id_priv, |
4073 | struct sockaddr *src_addr, |
4074 | const struct sockaddr *dst_addr) |
4075 | { |
4076 | int ret; |
4077 | |
4078 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_BOUND, exch: RDMA_CM_ADDR_QUERY)) { |
4079 | /* For a well behaved ULP state will be RDMA_CM_IDLE */ |
4080 | ret = cma_bind_addr(id: &id_priv->id, src_addr, dst_addr); |
4081 | if (ret) |
4082 | return ret; |
4083 | if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, |
4084 | RDMA_CM_ADDR_QUERY))) |
4085 | return -EINVAL; |
4086 | |
4087 | } else { |
4088 | memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); |
4089 | } |
4090 | |
4091 | if (cma_family(id_priv) != dst_addr->sa_family) { |
4092 | ret = -EINVAL; |
4093 | goto err_state; |
4094 | } |
4095 | return 0; |
4096 | |
4097 | err_state: |
4098 | cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_QUERY, exch: RDMA_CM_ADDR_BOUND); |
4099 | return ret; |
4100 | } |
4101 | |
4102 | int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, |
4103 | const struct sockaddr *dst_addr, unsigned long timeout_ms) |
4104 | { |
4105 | struct rdma_id_private *id_priv = |
4106 | container_of(id, struct rdma_id_private, id); |
4107 | int ret; |
4108 | |
4109 | ret = resolve_prepare_src(id_priv, src_addr, dst_addr); |
4110 | if (ret) |
4111 | return ret; |
4112 | |
4113 | if (cma_any_addr(addr: dst_addr)) { |
4114 | ret = cma_resolve_loopback(id_priv); |
4115 | } else { |
4116 | if (dst_addr->sa_family == AF_IB) { |
4117 | ret = cma_resolve_ib_addr(id_priv); |
4118 | } else { |
4119 | /* |
4120 | * The FSM can return back to RDMA_CM_ADDR_BOUND after |
4121 | * rdma_resolve_ip() is called, eg through the error |
4122 | * path in addr_handler(). If this happens the existing |
4123 | * request must be canceled before issuing a new one. |
4124 | * Since canceling a request is a bit slow and this |
4125 | * oddball path is rare, keep track once a request has |
4126 | * been issued. The track turns out to be a permanent |
4127 | * state since this is the only cancel as it is |
4128 | * immediately before rdma_resolve_ip(). |
4129 | */ |
4130 | if (id_priv->used_resolve_ip) |
4131 | rdma_addr_cancel(addr: &id->route.addr.dev_addr); |
4132 | else |
4133 | id_priv->used_resolve_ip = 1; |
4134 | ret = rdma_resolve_ip(src_addr: cma_src_addr(id_priv), dst_addr, |
4135 | addr: &id->route.addr.dev_addr, |
4136 | timeout_ms, callback: addr_handler, |
4137 | resolve_by_gid_attr: false, context: id_priv); |
4138 | } |
4139 | } |
4140 | if (ret) |
4141 | goto err; |
4142 | |
4143 | return 0; |
4144 | err: |
4145 | cma_comp_exch(id_priv, comp: RDMA_CM_ADDR_QUERY, exch: RDMA_CM_ADDR_BOUND); |
4146 | return ret; |
4147 | } |
4148 | EXPORT_SYMBOL(rdma_resolve_addr); |
4149 | |
4150 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) |
4151 | { |
4152 | struct rdma_id_private *id_priv = |
4153 | container_of(id, struct rdma_id_private, id); |
4154 | |
4155 | return rdma_bind_addr_dst(id_priv, addr, daddr: cma_dst_addr(id_priv)); |
4156 | } |
4157 | EXPORT_SYMBOL(rdma_bind_addr); |
4158 | |
4159 | static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) |
4160 | { |
4161 | struct cma_hdr *cma_hdr; |
4162 | |
4163 | cma_hdr = hdr; |
4164 | cma_hdr->cma_version = CMA_VERSION; |
4165 | if (cma_family(id_priv) == AF_INET) { |
4166 | struct sockaddr_in *src4, *dst4; |
4167 | |
4168 | src4 = (struct sockaddr_in *) cma_src_addr(id_priv); |
4169 | dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); |
4170 | |
4171 | cma_set_ip_ver(hdr: cma_hdr, ip_ver: 4); |
4172 | cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; |
4173 | cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; |
4174 | cma_hdr->port = src4->sin_port; |
4175 | } else if (cma_family(id_priv) == AF_INET6) { |
4176 | struct sockaddr_in6 *src6, *dst6; |
4177 | |
4178 | src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); |
4179 | dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); |
4180 | |
4181 | cma_set_ip_ver(hdr: cma_hdr, ip_ver: 6); |
4182 | cma_hdr->src_addr.ip6 = src6->sin6_addr; |
4183 | cma_hdr->dst_addr.ip6 = dst6->sin6_addr; |
4184 | cma_hdr->port = src6->sin6_port; |
4185 | } |
4186 | return 0; |
4187 | } |
4188 | |
4189 | static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, |
4190 | const struct ib_cm_event *ib_event) |
4191 | { |
4192 | struct rdma_id_private *id_priv = cm_id->context; |
4193 | struct rdma_cm_event event = {}; |
4194 | const struct ib_cm_sidr_rep_event_param *rep = |
4195 | &ib_event->param.sidr_rep_rcvd; |
4196 | int ret; |
4197 | |
4198 | mutex_lock(&id_priv->handler_mutex); |
4199 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
4200 | goto out; |
4201 | |
4202 | switch (ib_event->event) { |
4203 | case IB_CM_SIDR_REQ_ERROR: |
4204 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
4205 | event.status = -ETIMEDOUT; |
4206 | break; |
4207 | case IB_CM_SIDR_REP_RECEIVED: |
4208 | event.param.ud.private_data = ib_event->private_data; |
4209 | event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; |
4210 | if (rep->status != IB_SIDR_SUCCESS) { |
4211 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
4212 | event.status = ib_event->param.sidr_rep_rcvd.status; |
4213 | pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n" , |
4214 | event.status); |
4215 | break; |
4216 | } |
4217 | ret = cma_set_qkey(id_priv, qkey: rep->qkey); |
4218 | if (ret) { |
4219 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n" , ret); |
4220 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
4221 | event.status = ret; |
4222 | break; |
4223 | } |
4224 | ib_init_ah_attr_from_path(device: id_priv->id.device, |
4225 | port_num: id_priv->id.port_num, |
4226 | rec: id_priv->id.route.path_rec, |
4227 | ah_attr: &event.param.ud.ah_attr, |
4228 | sgid_attr: rep->sgid_attr); |
4229 | event.param.ud.qp_num = rep->qpn; |
4230 | event.param.ud.qkey = rep->qkey; |
4231 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
4232 | event.status = 0; |
4233 | break; |
4234 | default: |
4235 | pr_err("RDMA CMA: unexpected IB CM event: %d\n" , |
4236 | ib_event->event); |
4237 | goto out; |
4238 | } |
4239 | |
4240 | ret = cma_cm_event_handler(id_priv, event: &event); |
4241 | |
4242 | rdma_destroy_ah_attr(ah_attr: &event.param.ud.ah_attr); |
4243 | if (ret) { |
4244 | /* Destroy the CM ID by returning a non-zero value. */ |
4245 | id_priv->cm_id.ib = NULL; |
4246 | destroy_id_handler_unlock(id_priv); |
4247 | return ret; |
4248 | } |
4249 | out: |
4250 | mutex_unlock(lock: &id_priv->handler_mutex); |
4251 | return 0; |
4252 | } |
4253 | |
4254 | static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, |
4255 | struct rdma_conn_param *conn_param) |
4256 | { |
4257 | struct ib_cm_sidr_req_param req; |
4258 | struct ib_cm_id *id; |
4259 | void *private_data; |
4260 | u8 offset; |
4261 | int ret; |
4262 | |
4263 | memset(&req, 0, sizeof req); |
4264 | offset = cma_user_data_offset(id_priv); |
4265 | if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) |
4266 | return -EINVAL; |
4267 | |
4268 | if (req.private_data_len) { |
4269 | private_data = kzalloc(size: req.private_data_len, GFP_ATOMIC); |
4270 | if (!private_data) |
4271 | return -ENOMEM; |
4272 | } else { |
4273 | private_data = NULL; |
4274 | } |
4275 | |
4276 | if (conn_param->private_data && conn_param->private_data_len) |
4277 | memcpy(private_data + offset, conn_param->private_data, |
4278 | conn_param->private_data_len); |
4279 | |
4280 | if (private_data) { |
4281 | ret = cma_format_hdr(hdr: private_data, id_priv); |
4282 | if (ret) |
4283 | goto out; |
4284 | req.private_data = private_data; |
4285 | } |
4286 | |
4287 | id = ib_create_cm_id(device: id_priv->id.device, cm_handler: cma_sidr_rep_handler, |
4288 | context: id_priv); |
4289 | if (IS_ERR(ptr: id)) { |
4290 | ret = PTR_ERR(ptr: id); |
4291 | goto out; |
4292 | } |
4293 | id_priv->cm_id.ib = id; |
4294 | |
4295 | req.path = id_priv->id.route.path_rec; |
4296 | req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; |
4297 | req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); |
4298 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); |
4299 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
4300 | |
4301 | trace_cm_send_sidr_req(id_priv); |
4302 | ret = ib_send_cm_sidr_req(cm_id: id_priv->cm_id.ib, param: &req); |
4303 | if (ret) { |
4304 | ib_destroy_cm_id(cm_id: id_priv->cm_id.ib); |
4305 | id_priv->cm_id.ib = NULL; |
4306 | } |
4307 | out: |
4308 | kfree(objp: private_data); |
4309 | return ret; |
4310 | } |
4311 | |
4312 | static int cma_connect_ib(struct rdma_id_private *id_priv, |
4313 | struct rdma_conn_param *conn_param) |
4314 | { |
4315 | struct ib_cm_req_param req; |
4316 | struct rdma_route *route; |
4317 | void *private_data; |
4318 | struct ib_cm_id *id; |
4319 | u8 offset; |
4320 | int ret; |
4321 | |
4322 | memset(&req, 0, sizeof req); |
4323 | offset = cma_user_data_offset(id_priv); |
4324 | if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) |
4325 | return -EINVAL; |
4326 | |
4327 | if (req.private_data_len) { |
4328 | private_data = kzalloc(size: req.private_data_len, GFP_ATOMIC); |
4329 | if (!private_data) |
4330 | return -ENOMEM; |
4331 | } else { |
4332 | private_data = NULL; |
4333 | } |
4334 | |
4335 | if (conn_param->private_data && conn_param->private_data_len) |
4336 | memcpy(private_data + offset, conn_param->private_data, |
4337 | conn_param->private_data_len); |
4338 | |
4339 | id = ib_create_cm_id(device: id_priv->id.device, cm_handler: cma_ib_handler, context: id_priv); |
4340 | if (IS_ERR(ptr: id)) { |
4341 | ret = PTR_ERR(ptr: id); |
4342 | goto out; |
4343 | } |
4344 | id_priv->cm_id.ib = id; |
4345 | |
4346 | route = &id_priv->id.route; |
4347 | if (private_data) { |
4348 | ret = cma_format_hdr(hdr: private_data, id_priv); |
4349 | if (ret) |
4350 | goto out; |
4351 | req.private_data = private_data; |
4352 | } |
4353 | |
4354 | req.primary_path = &route->path_rec[0]; |
4355 | req.primary_path_inbound = route->path_rec_inbound; |
4356 | req.primary_path_outbound = route->path_rec_outbound; |
4357 | if (route->num_pri_alt_paths == 2) |
4358 | req.alternate_path = &route->path_rec[1]; |
4359 | |
4360 | req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; |
4361 | /* Alternate path SGID attribute currently unsupported */ |
4362 | req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); |
4363 | req.qp_num = id_priv->qp_num; |
4364 | req.qp_type = id_priv->id.qp_type; |
4365 | req.starting_psn = id_priv->seq_num; |
4366 | req.responder_resources = conn_param->responder_resources; |
4367 | req.initiator_depth = conn_param->initiator_depth; |
4368 | req.flow_control = conn_param->flow_control; |
4369 | req.retry_count = min_t(u8, 7, conn_param->retry_count); |
4370 | req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
4371 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
4372 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
4373 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
4374 | req.srq = id_priv->srq ? 1 : 0; |
4375 | req.ece.vendor_id = id_priv->ece.vendor_id; |
4376 | req.ece.attr_mod = id_priv->ece.attr_mod; |
4377 | |
4378 | trace_cm_send_req(id_priv); |
4379 | ret = ib_send_cm_req(cm_id: id_priv->cm_id.ib, param: &req); |
4380 | out: |
4381 | if (ret && !IS_ERR(ptr: id)) { |
4382 | ib_destroy_cm_id(cm_id: id); |
4383 | id_priv->cm_id.ib = NULL; |
4384 | } |
4385 | |
4386 | kfree(objp: private_data); |
4387 | return ret; |
4388 | } |
4389 | |
4390 | static int cma_connect_iw(struct rdma_id_private *id_priv, |
4391 | struct rdma_conn_param *conn_param) |
4392 | { |
4393 | struct iw_cm_id *cm_id; |
4394 | int ret; |
4395 | struct iw_cm_conn_param iw_param; |
4396 | |
4397 | cm_id = iw_create_cm_id(device: id_priv->id.device, cm_handler: cma_iw_handler, context: id_priv); |
4398 | if (IS_ERR(ptr: cm_id)) |
4399 | return PTR_ERR(ptr: cm_id); |
4400 | |
4401 | mutex_lock(&id_priv->qp_mutex); |
4402 | cm_id->tos = id_priv->tos; |
4403 | cm_id->tos_set = id_priv->tos_set; |
4404 | mutex_unlock(lock: &id_priv->qp_mutex); |
4405 | |
4406 | id_priv->cm_id.iw = cm_id; |
4407 | |
4408 | memcpy(&cm_id->local_addr, cma_src_addr(id_priv), |
4409 | rdma_addr_size(cma_src_addr(id_priv))); |
4410 | memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), |
4411 | rdma_addr_size(cma_dst_addr(id_priv))); |
4412 | |
4413 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
4414 | if (ret) |
4415 | goto out; |
4416 | |
4417 | if (conn_param) { |
4418 | iw_param.ord = conn_param->initiator_depth; |
4419 | iw_param.ird = conn_param->responder_resources; |
4420 | iw_param.private_data = conn_param->private_data; |
4421 | iw_param.private_data_len = conn_param->private_data_len; |
4422 | iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; |
4423 | } else { |
4424 | memset(&iw_param, 0, sizeof iw_param); |
4425 | iw_param.qpn = id_priv->qp_num; |
4426 | } |
4427 | ret = iw_cm_connect(cm_id, iw_param: &iw_param); |
4428 | out: |
4429 | if (ret) { |
4430 | iw_destroy_cm_id(cm_id); |
4431 | id_priv->cm_id.iw = NULL; |
4432 | } |
4433 | return ret; |
4434 | } |
4435 | |
4436 | /** |
4437 | * rdma_connect_locked - Initiate an active connection request. |
4438 | * @id: Connection identifier to connect. |
4439 | * @conn_param: Connection information used for connected QPs. |
4440 | * |
4441 | * Same as rdma_connect() but can only be called from the |
4442 | * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. |
4443 | */ |
4444 | int rdma_connect_locked(struct rdma_cm_id *id, |
4445 | struct rdma_conn_param *conn_param) |
4446 | { |
4447 | struct rdma_id_private *id_priv = |
4448 | container_of(id, struct rdma_id_private, id); |
4449 | int ret; |
4450 | |
4451 | if (!cma_comp_exch(id_priv, comp: RDMA_CM_ROUTE_RESOLVED, exch: RDMA_CM_CONNECT)) |
4452 | return -EINVAL; |
4453 | |
4454 | if (!id->qp) { |
4455 | id_priv->qp_num = conn_param->qp_num; |
4456 | id_priv->srq = conn_param->srq; |
4457 | } |
4458 | |
4459 | if (rdma_cap_ib_cm(device: id->device, port_num: id->port_num)) { |
4460 | if (id->qp_type == IB_QPT_UD) |
4461 | ret = cma_resolve_ib_udp(id_priv, conn_param); |
4462 | else |
4463 | ret = cma_connect_ib(id_priv, conn_param); |
4464 | } else if (rdma_cap_iw_cm(device: id->device, port_num: id->port_num)) { |
4465 | ret = cma_connect_iw(id_priv, conn_param); |
4466 | } else { |
4467 | ret = -ENOSYS; |
4468 | } |
4469 | if (ret) |
4470 | goto err_state; |
4471 | return 0; |
4472 | err_state: |
4473 | cma_comp_exch(id_priv, comp: RDMA_CM_CONNECT, exch: RDMA_CM_ROUTE_RESOLVED); |
4474 | return ret; |
4475 | } |
4476 | EXPORT_SYMBOL(rdma_connect_locked); |
4477 | |
4478 | /** |
4479 | * rdma_connect - Initiate an active connection request. |
4480 | * @id: Connection identifier to connect. |
4481 | * @conn_param: Connection information used for connected QPs. |
4482 | * |
4483 | * Users must have resolved a route for the rdma_cm_id to connect with by having |
4484 | * called rdma_resolve_route before calling this routine. |
4485 | * |
4486 | * This call will either connect to a remote QP or obtain remote QP information |
4487 | * for unconnected rdma_cm_id's. The actual operation is based on the |
4488 | * rdma_cm_id's port space. |
4489 | */ |
4490 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
4491 | { |
4492 | struct rdma_id_private *id_priv = |
4493 | container_of(id, struct rdma_id_private, id); |
4494 | int ret; |
4495 | |
4496 | mutex_lock(&id_priv->handler_mutex); |
4497 | ret = rdma_connect_locked(id, conn_param); |
4498 | mutex_unlock(lock: &id_priv->handler_mutex); |
4499 | return ret; |
4500 | } |
4501 | EXPORT_SYMBOL(rdma_connect); |
4502 | |
4503 | /** |
4504 | * rdma_connect_ece - Initiate an active connection request with ECE data. |
4505 | * @id: Connection identifier to connect. |
4506 | * @conn_param: Connection information used for connected QPs. |
4507 | * @ece: ECE parameters |
4508 | * |
4509 | * See rdma_connect() explanation. |
4510 | */ |
4511 | int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
4512 | struct rdma_ucm_ece *ece) |
4513 | { |
4514 | struct rdma_id_private *id_priv = |
4515 | container_of(id, struct rdma_id_private, id); |
4516 | |
4517 | id_priv->ece.vendor_id = ece->vendor_id; |
4518 | id_priv->ece.attr_mod = ece->attr_mod; |
4519 | |
4520 | return rdma_connect(id, conn_param); |
4521 | } |
4522 | EXPORT_SYMBOL(rdma_connect_ece); |
4523 | |
4524 | static int cma_accept_ib(struct rdma_id_private *id_priv, |
4525 | struct rdma_conn_param *conn_param) |
4526 | { |
4527 | struct ib_cm_rep_param rep; |
4528 | int ret; |
4529 | |
4530 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
4531 | if (ret) |
4532 | goto out; |
4533 | |
4534 | ret = cma_modify_qp_rts(id_priv, conn_param); |
4535 | if (ret) |
4536 | goto out; |
4537 | |
4538 | memset(&rep, 0, sizeof rep); |
4539 | rep.qp_num = id_priv->qp_num; |
4540 | rep.starting_psn = id_priv->seq_num; |
4541 | rep.private_data = conn_param->private_data; |
4542 | rep.private_data_len = conn_param->private_data_len; |
4543 | rep.responder_resources = conn_param->responder_resources; |
4544 | rep.initiator_depth = conn_param->initiator_depth; |
4545 | rep.failover_accepted = 0; |
4546 | rep.flow_control = conn_param->flow_control; |
4547 | rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
4548 | rep.srq = id_priv->srq ? 1 : 0; |
4549 | rep.ece.vendor_id = id_priv->ece.vendor_id; |
4550 | rep.ece.attr_mod = id_priv->ece.attr_mod; |
4551 | |
4552 | trace_cm_send_rep(id_priv); |
4553 | ret = ib_send_cm_rep(cm_id: id_priv->cm_id.ib, param: &rep); |
4554 | out: |
4555 | return ret; |
4556 | } |
4557 | |
4558 | static int cma_accept_iw(struct rdma_id_private *id_priv, |
4559 | struct rdma_conn_param *conn_param) |
4560 | { |
4561 | struct iw_cm_conn_param iw_param; |
4562 | int ret; |
4563 | |
4564 | if (!conn_param) |
4565 | return -EINVAL; |
4566 | |
4567 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
4568 | if (ret) |
4569 | return ret; |
4570 | |
4571 | iw_param.ord = conn_param->initiator_depth; |
4572 | iw_param.ird = conn_param->responder_resources; |
4573 | iw_param.private_data = conn_param->private_data; |
4574 | iw_param.private_data_len = conn_param->private_data_len; |
4575 | if (id_priv->id.qp) |
4576 | iw_param.qpn = id_priv->qp_num; |
4577 | else |
4578 | iw_param.qpn = conn_param->qp_num; |
4579 | |
4580 | return iw_cm_accept(cm_id: id_priv->cm_id.iw, iw_param: &iw_param); |
4581 | } |
4582 | |
4583 | static int cma_send_sidr_rep(struct rdma_id_private *id_priv, |
4584 | enum ib_cm_sidr_status status, u32 qkey, |
4585 | const void *private_data, int private_data_len) |
4586 | { |
4587 | struct ib_cm_sidr_rep_param rep; |
4588 | int ret; |
4589 | |
4590 | memset(&rep, 0, sizeof rep); |
4591 | rep.status = status; |
4592 | if (status == IB_SIDR_SUCCESS) { |
4593 | if (qkey) |
4594 | ret = cma_set_qkey(id_priv, qkey); |
4595 | else |
4596 | ret = cma_set_default_qkey(id_priv); |
4597 | if (ret) |
4598 | return ret; |
4599 | rep.qp_num = id_priv->qp_num; |
4600 | rep.qkey = id_priv->qkey; |
4601 | |
4602 | rep.ece.vendor_id = id_priv->ece.vendor_id; |
4603 | rep.ece.attr_mod = id_priv->ece.attr_mod; |
4604 | } |
4605 | |
4606 | rep.private_data = private_data; |
4607 | rep.private_data_len = private_data_len; |
4608 | |
4609 | trace_cm_send_sidr_rep(id_priv); |
4610 | return ib_send_cm_sidr_rep(cm_id: id_priv->cm_id.ib, param: &rep); |
4611 | } |
4612 | |
4613 | /** |
4614 | * rdma_accept - Called to accept a connection request or response. |
4615 | * @id: Connection identifier associated with the request. |
4616 | * @conn_param: Information needed to establish the connection. This must be |
4617 | * provided if accepting a connection request. If accepting a connection |
4618 | * response, this parameter must be NULL. |
4619 | * |
4620 | * Typically, this routine is only called by the listener to accept a connection |
4621 | * request. It must also be called on the active side of a connection if the |
4622 | * user is performing their own QP transitions. |
4623 | * |
4624 | * In the case of error, a reject message is sent to the remote side and the |
4625 | * state of the qp associated with the id is modified to error, such that any |
4626 | * previously posted receive buffers would be flushed. |
4627 | * |
4628 | * This function is for use by kernel ULPs and must be called from under the |
4629 | * handler callback. |
4630 | */ |
4631 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
4632 | { |
4633 | struct rdma_id_private *id_priv = |
4634 | container_of(id, struct rdma_id_private, id); |
4635 | int ret; |
4636 | |
4637 | lockdep_assert_held(&id_priv->handler_mutex); |
4638 | |
4639 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
4640 | return -EINVAL; |
4641 | |
4642 | if (!id->qp && conn_param) { |
4643 | id_priv->qp_num = conn_param->qp_num; |
4644 | id_priv->srq = conn_param->srq; |
4645 | } |
4646 | |
4647 | if (rdma_cap_ib_cm(device: id->device, port_num: id->port_num)) { |
4648 | if (id->qp_type == IB_QPT_UD) { |
4649 | if (conn_param) |
4650 | ret = cma_send_sidr_rep(id_priv, status: IB_SIDR_SUCCESS, |
4651 | qkey: conn_param->qkey, |
4652 | private_data: conn_param->private_data, |
4653 | private_data_len: conn_param->private_data_len); |
4654 | else |
4655 | ret = cma_send_sidr_rep(id_priv, status: IB_SIDR_SUCCESS, |
4656 | qkey: 0, NULL, private_data_len: 0); |
4657 | } else { |
4658 | if (conn_param) |
4659 | ret = cma_accept_ib(id_priv, conn_param); |
4660 | else |
4661 | ret = cma_rep_recv(id_priv); |
4662 | } |
4663 | } else if (rdma_cap_iw_cm(device: id->device, port_num: id->port_num)) { |
4664 | ret = cma_accept_iw(id_priv, conn_param); |
4665 | } else { |
4666 | ret = -ENOSYS; |
4667 | } |
4668 | if (ret) |
4669 | goto reject; |
4670 | |
4671 | return 0; |
4672 | reject: |
4673 | cma_modify_qp_err(id_priv); |
4674 | rdma_reject(id, NULL, private_data_len: 0, reason: IB_CM_REJ_CONSUMER_DEFINED); |
4675 | return ret; |
4676 | } |
4677 | EXPORT_SYMBOL(rdma_accept); |
4678 | |
4679 | int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
4680 | struct rdma_ucm_ece *ece) |
4681 | { |
4682 | struct rdma_id_private *id_priv = |
4683 | container_of(id, struct rdma_id_private, id); |
4684 | |
4685 | id_priv->ece.vendor_id = ece->vendor_id; |
4686 | id_priv->ece.attr_mod = ece->attr_mod; |
4687 | |
4688 | return rdma_accept(id, conn_param); |
4689 | } |
4690 | EXPORT_SYMBOL(rdma_accept_ece); |
4691 | |
4692 | void rdma_lock_handler(struct rdma_cm_id *id) |
4693 | { |
4694 | struct rdma_id_private *id_priv = |
4695 | container_of(id, struct rdma_id_private, id); |
4696 | |
4697 | mutex_lock(&id_priv->handler_mutex); |
4698 | } |
4699 | EXPORT_SYMBOL(rdma_lock_handler); |
4700 | |
4701 | void rdma_unlock_handler(struct rdma_cm_id *id) |
4702 | { |
4703 | struct rdma_id_private *id_priv = |
4704 | container_of(id, struct rdma_id_private, id); |
4705 | |
4706 | mutex_unlock(lock: &id_priv->handler_mutex); |
4707 | } |
4708 | EXPORT_SYMBOL(rdma_unlock_handler); |
4709 | |
4710 | int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) |
4711 | { |
4712 | struct rdma_id_private *id_priv; |
4713 | int ret; |
4714 | |
4715 | id_priv = container_of(id, struct rdma_id_private, id); |
4716 | if (!id_priv->cm_id.ib) |
4717 | return -EINVAL; |
4718 | |
4719 | switch (id->device->node_type) { |
4720 | case RDMA_NODE_IB_CA: |
4721 | ret = ib_cm_notify(cm_id: id_priv->cm_id.ib, event); |
4722 | break; |
4723 | default: |
4724 | ret = 0; |
4725 | break; |
4726 | } |
4727 | return ret; |
4728 | } |
4729 | EXPORT_SYMBOL(rdma_notify); |
4730 | |
4731 | int rdma_reject(struct rdma_cm_id *id, const void *private_data, |
4732 | u8 private_data_len, u8 reason) |
4733 | { |
4734 | struct rdma_id_private *id_priv; |
4735 | int ret; |
4736 | |
4737 | id_priv = container_of(id, struct rdma_id_private, id); |
4738 | if (!id_priv->cm_id.ib) |
4739 | return -EINVAL; |
4740 | |
4741 | if (rdma_cap_ib_cm(device: id->device, port_num: id->port_num)) { |
4742 | if (id->qp_type == IB_QPT_UD) { |
4743 | ret = cma_send_sidr_rep(id_priv, status: IB_SIDR_REJECT, qkey: 0, |
4744 | private_data, private_data_len); |
4745 | } else { |
4746 | trace_cm_send_rej(id_priv); |
4747 | ret = ib_send_cm_rej(cm_id: id_priv->cm_id.ib, reason, NULL, ari_length: 0, |
4748 | private_data, private_data_len); |
4749 | } |
4750 | } else if (rdma_cap_iw_cm(device: id->device, port_num: id->port_num)) { |
4751 | ret = iw_cm_reject(cm_id: id_priv->cm_id.iw, |
4752 | private_data, private_data_len); |
4753 | } else { |
4754 | ret = -ENOSYS; |
4755 | } |
4756 | |
4757 | return ret; |
4758 | } |
4759 | EXPORT_SYMBOL(rdma_reject); |
4760 | |
4761 | int rdma_disconnect(struct rdma_cm_id *id) |
4762 | { |
4763 | struct rdma_id_private *id_priv; |
4764 | int ret; |
4765 | |
4766 | id_priv = container_of(id, struct rdma_id_private, id); |
4767 | if (!id_priv->cm_id.ib) |
4768 | return -EINVAL; |
4769 | |
4770 | if (rdma_cap_ib_cm(device: id->device, port_num: id->port_num)) { |
4771 | ret = cma_modify_qp_err(id_priv); |
4772 | if (ret) |
4773 | goto out; |
4774 | /* Initiate or respond to a disconnect. */ |
4775 | trace_cm_disconnect(id_priv); |
4776 | if (ib_send_cm_dreq(cm_id: id_priv->cm_id.ib, NULL, private_data_len: 0)) { |
4777 | if (!ib_send_cm_drep(cm_id: id_priv->cm_id.ib, NULL, private_data_len: 0)) |
4778 | trace_cm_sent_drep(id_priv); |
4779 | } else { |
4780 | trace_cm_sent_dreq(id_priv); |
4781 | } |
4782 | } else if (rdma_cap_iw_cm(device: id->device, port_num: id->port_num)) { |
4783 | ret = iw_cm_disconnect(cm_id: id_priv->cm_id.iw, abrupt: 0); |
4784 | } else |
4785 | ret = -EINVAL; |
4786 | |
4787 | out: |
4788 | return ret; |
4789 | } |
4790 | EXPORT_SYMBOL(rdma_disconnect); |
4791 | |
4792 | static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, |
4793 | struct ib_sa_multicast *multicast, |
4794 | struct rdma_cm_event *event, |
4795 | struct cma_multicast *mc) |
4796 | { |
4797 | struct rdma_dev_addr *dev_addr; |
4798 | enum ib_gid_type gid_type; |
4799 | struct net_device *ndev; |
4800 | |
4801 | if (status) |
4802 | pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n" , |
4803 | status); |
4804 | |
4805 | event->status = status; |
4806 | event->param.ud.private_data = mc->context; |
4807 | if (status) { |
4808 | event->event = RDMA_CM_EVENT_MULTICAST_ERROR; |
4809 | return; |
4810 | } |
4811 | |
4812 | dev_addr = &id_priv->id.route.addr.dev_addr; |
4813 | ndev = dev_get_by_index(net: dev_addr->net, ifindex: dev_addr->bound_dev_if); |
4814 | gid_type = |
4815 | id_priv->cma_dev |
4816 | ->default_gid_type[id_priv->id.port_num - |
4817 | rdma_start_port( |
4818 | device: id_priv->cma_dev->device)]; |
4819 | |
4820 | event->event = RDMA_CM_EVENT_MULTICAST_JOIN; |
4821 | if (ib_init_ah_from_mcmember(device: id_priv->id.device, port_num: id_priv->id.port_num, |
4822 | rec: &multicast->rec, ndev, gid_type, |
4823 | ah_attr: &event->param.ud.ah_attr)) { |
4824 | event->event = RDMA_CM_EVENT_MULTICAST_ERROR; |
4825 | goto out; |
4826 | } |
4827 | |
4828 | event->param.ud.qp_num = 0xFFFFFF; |
4829 | event->param.ud.qkey = id_priv->qkey; |
4830 | |
4831 | out: |
4832 | dev_put(dev: ndev); |
4833 | } |
4834 | |
4835 | static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) |
4836 | { |
4837 | struct cma_multicast *mc = multicast->context; |
4838 | struct rdma_id_private *id_priv = mc->id_priv; |
4839 | struct rdma_cm_event event = {}; |
4840 | int ret = 0; |
4841 | |
4842 | mutex_lock(&id_priv->handler_mutex); |
4843 | if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || |
4844 | READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) |
4845 | goto out; |
4846 | |
4847 | ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); |
4848 | if (!ret) { |
4849 | cma_make_mc_event(status, id_priv, multicast, event: &event, mc); |
4850 | ret = cma_cm_event_handler(id_priv, event: &event); |
4851 | } |
4852 | rdma_destroy_ah_attr(ah_attr: &event.param.ud.ah_attr); |
4853 | WARN_ON(ret); |
4854 | |
4855 | out: |
4856 | mutex_unlock(lock: &id_priv->handler_mutex); |
4857 | return 0; |
4858 | } |
4859 | |
4860 | static void cma_set_mgid(struct rdma_id_private *id_priv, |
4861 | struct sockaddr *addr, union ib_gid *mgid) |
4862 | { |
4863 | unsigned char mc_map[MAX_ADDR_LEN]; |
4864 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
4865 | struct sockaddr_in *sin = (struct sockaddr_in *) addr; |
4866 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; |
4867 | |
4868 | if (cma_any_addr(addr)) { |
4869 | memset(mgid, 0, sizeof *mgid); |
4870 | } else if ((addr->sa_family == AF_INET6) && |
4871 | ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == |
4872 | 0xFF10A01B)) { |
4873 | /* IPv6 address is an SA assigned MGID. */ |
4874 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); |
4875 | } else if (addr->sa_family == AF_IB) { |
4876 | memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); |
4877 | } else if (addr->sa_family == AF_INET6) { |
4878 | ipv6_ib_mc_map(addr: &sin6->sin6_addr, broadcast: dev_addr->broadcast, buf: mc_map); |
4879 | if (id_priv->id.ps == RDMA_PS_UDP) |
4880 | mc_map[7] = 0x01; /* Use RDMA CM signature */ |
4881 | *mgid = *(union ib_gid *) (mc_map + 4); |
4882 | } else { |
4883 | ip_ib_mc_map(naddr: sin->sin_addr.s_addr, broadcast: dev_addr->broadcast, buf: mc_map); |
4884 | if (id_priv->id.ps == RDMA_PS_UDP) |
4885 | mc_map[7] = 0x01; /* Use RDMA CM signature */ |
4886 | *mgid = *(union ib_gid *) (mc_map + 4); |
4887 | } |
4888 | } |
4889 | |
4890 | static int cma_join_ib_multicast(struct rdma_id_private *id_priv, |
4891 | struct cma_multicast *mc) |
4892 | { |
4893 | struct ib_sa_mcmember_rec rec; |
4894 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
4895 | ib_sa_comp_mask comp_mask; |
4896 | int ret; |
4897 | |
4898 | ib_addr_get_mgid(dev_addr, gid: &rec.mgid); |
4899 | ret = ib_sa_get_mcmember_rec(device: id_priv->id.device, port_num: id_priv->id.port_num, |
4900 | mgid: &rec.mgid, rec: &rec); |
4901 | if (ret) |
4902 | return ret; |
4903 | |
4904 | if (!id_priv->qkey) { |
4905 | ret = cma_set_default_qkey(id_priv); |
4906 | if (ret) |
4907 | return ret; |
4908 | } |
4909 | |
4910 | cma_set_mgid(id_priv, addr: (struct sockaddr *) &mc->addr, mgid: &rec.mgid); |
4911 | rec.qkey = cpu_to_be32(id_priv->qkey); |
4912 | rdma_addr_get_sgid(dev_addr, gid: &rec.port_gid); |
4913 | rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
4914 | rec.join_state = mc->join_state; |
4915 | |
4916 | comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | |
4917 | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | |
4918 | IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | |
4919 | IB_SA_MCMEMBER_REC_FLOW_LABEL | |
4920 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; |
4921 | |
4922 | if (id_priv->id.ps == RDMA_PS_IPOIB) |
4923 | comp_mask |= IB_SA_MCMEMBER_REC_RATE | |
4924 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | |
4925 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | |
4926 | IB_SA_MCMEMBER_REC_MTU | |
4927 | IB_SA_MCMEMBER_REC_HOP_LIMIT; |
4928 | |
4929 | mc->sa_mc = ib_sa_join_multicast(client: &sa_client, device: id_priv->id.device, |
4930 | port_num: id_priv->id.port_num, rec: &rec, comp_mask, |
4931 | GFP_KERNEL, callback: cma_ib_mc_handler, context: mc); |
4932 | return PTR_ERR_OR_ZERO(ptr: mc->sa_mc); |
4933 | } |
4934 | |
4935 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
4936 | enum ib_gid_type gid_type) |
4937 | { |
4938 | struct sockaddr_in *sin = (struct sockaddr_in *)addr; |
4939 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; |
4940 | |
4941 | if (cma_any_addr(addr)) { |
4942 | memset(mgid, 0, sizeof *mgid); |
4943 | } else if (addr->sa_family == AF_INET6) { |
4944 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); |
4945 | } else { |
4946 | mgid->raw[0] = |
4947 | (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; |
4948 | mgid->raw[1] = |
4949 | (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; |
4950 | mgid->raw[2] = 0; |
4951 | mgid->raw[3] = 0; |
4952 | mgid->raw[4] = 0; |
4953 | mgid->raw[5] = 0; |
4954 | mgid->raw[6] = 0; |
4955 | mgid->raw[7] = 0; |
4956 | mgid->raw[8] = 0; |
4957 | mgid->raw[9] = 0; |
4958 | mgid->raw[10] = 0xff; |
4959 | mgid->raw[11] = 0xff; |
4960 | *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; |
4961 | } |
4962 | } |
4963 | |
4964 | static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, |
4965 | struct cma_multicast *mc) |
4966 | { |
4967 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
4968 | int err = 0; |
4969 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; |
4970 | struct net_device *ndev = NULL; |
4971 | struct ib_sa_multicast ib = {}; |
4972 | enum ib_gid_type gid_type; |
4973 | bool send_only; |
4974 | |
4975 | send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); |
4976 | |
4977 | if (cma_zero_addr(addr)) |
4978 | return -EINVAL; |
4979 | |
4980 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
4981 | rdma_start_port(device: id_priv->cma_dev->device)]; |
4982 | cma_iboe_set_mgid(addr, mgid: &ib.rec.mgid, gid_type); |
4983 | |
4984 | ib.rec.pkey = cpu_to_be16(0xffff); |
4985 | if (dev_addr->bound_dev_if) |
4986 | ndev = dev_get_by_index(net: dev_addr->net, ifindex: dev_addr->bound_dev_if); |
4987 | if (!ndev) |
4988 | return -ENODEV; |
4989 | |
4990 | ib.rec.rate = IB_RATE_PORT_CURRENT; |
4991 | ib.rec.hop_limit = 1; |
4992 | ib.rec.mtu = iboe_get_mtu(mtu: ndev->mtu); |
4993 | |
4994 | if (addr->sa_family == AF_INET) { |
4995 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
4996 | ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; |
4997 | if (!send_only) { |
4998 | err = cma_igmp_send(ndev, mgid: &ib.rec.mgid, |
4999 | join: true); |
5000 | } |
5001 | } |
5002 | } else { |
5003 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) |
5004 | err = -ENOTSUPP; |
5005 | } |
5006 | dev_put(dev: ndev); |
5007 | if (err || !ib.rec.mtu) |
5008 | return err ?: -EINVAL; |
5009 | |
5010 | if (!id_priv->qkey) |
5011 | cma_set_default_qkey(id_priv); |
5012 | |
5013 | rdma_ip2gid(addr: (struct sockaddr *)&id_priv->id.route.addr.src_addr, |
5014 | gid: &ib.rec.port_gid); |
5015 | INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); |
5016 | cma_make_mc_event(status: 0, id_priv, multicast: &ib, event: &mc->iboe_join.event, mc); |
5017 | queue_work(wq: cma_wq, work: &mc->iboe_join.work); |
5018 | return 0; |
5019 | } |
5020 | |
5021 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
5022 | u8 join_state, void *context) |
5023 | { |
5024 | struct rdma_id_private *id_priv = |
5025 | container_of(id, struct rdma_id_private, id); |
5026 | struct cma_multicast *mc; |
5027 | int ret; |
5028 | |
5029 | /* Not supported for kernel QPs */ |
5030 | if (WARN_ON(id->qp)) |
5031 | return -EINVAL; |
5032 | |
5033 | /* ULP is calling this wrong. */ |
5034 | if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && |
5035 | READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) |
5036 | return -EINVAL; |
5037 | |
5038 | if (id_priv->id.qp_type != IB_QPT_UD) |
5039 | return -EINVAL; |
5040 | |
5041 | mc = kzalloc(size: sizeof(*mc), GFP_KERNEL); |
5042 | if (!mc) |
5043 | return -ENOMEM; |
5044 | |
5045 | memcpy(&mc->addr, addr, rdma_addr_size(addr)); |
5046 | mc->context = context; |
5047 | mc->id_priv = id_priv; |
5048 | mc->join_state = join_state; |
5049 | |
5050 | if (rdma_protocol_roce(device: id->device, port_num: id->port_num)) { |
5051 | ret = cma_iboe_join_multicast(id_priv, mc); |
5052 | if (ret) |
5053 | goto out_err; |
5054 | } else if (rdma_cap_ib_mcast(device: id->device, port_num: id->port_num)) { |
5055 | ret = cma_join_ib_multicast(id_priv, mc); |
5056 | if (ret) |
5057 | goto out_err; |
5058 | } else { |
5059 | ret = -ENOSYS; |
5060 | goto out_err; |
5061 | } |
5062 | |
5063 | spin_lock(lock: &id_priv->lock); |
5064 | list_add(new: &mc->list, head: &id_priv->mc_list); |
5065 | spin_unlock(lock: &id_priv->lock); |
5066 | |
5067 | return 0; |
5068 | out_err: |
5069 | kfree(objp: mc); |
5070 | return ret; |
5071 | } |
5072 | EXPORT_SYMBOL(rdma_join_multicast); |
5073 | |
5074 | void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) |
5075 | { |
5076 | struct rdma_id_private *id_priv; |
5077 | struct cma_multicast *mc; |
5078 | |
5079 | id_priv = container_of(id, struct rdma_id_private, id); |
5080 | spin_lock_irq(lock: &id_priv->lock); |
5081 | list_for_each_entry(mc, &id_priv->mc_list, list) { |
5082 | if (memcmp(p: &mc->addr, q: addr, size: rdma_addr_size(addr)) != 0) |
5083 | continue; |
5084 | list_del(entry: &mc->list); |
5085 | spin_unlock_irq(lock: &id_priv->lock); |
5086 | |
5087 | WARN_ON(id_priv->cma_dev->device != id->device); |
5088 | destroy_mc(id_priv, mc); |
5089 | return; |
5090 | } |
5091 | spin_unlock_irq(lock: &id_priv->lock); |
5092 | } |
5093 | EXPORT_SYMBOL(rdma_leave_multicast); |
5094 | |
5095 | static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) |
5096 | { |
5097 | struct rdma_dev_addr *dev_addr; |
5098 | struct cma_work *work; |
5099 | |
5100 | dev_addr = &id_priv->id.route.addr.dev_addr; |
5101 | |
5102 | if ((dev_addr->bound_dev_if == ndev->ifindex) && |
5103 | (net_eq(net1: dev_net(dev: ndev), net2: dev_addr->net)) && |
5104 | memcmp(p: dev_addr->src_dev_addr, q: ndev->dev_addr, size: ndev->addr_len)) { |
5105 | pr_info("RDMA CM addr change for ndev %s used by id %p\n" , |
5106 | ndev->name, &id_priv->id); |
5107 | work = kzalloc(size: sizeof *work, GFP_KERNEL); |
5108 | if (!work) |
5109 | return -ENOMEM; |
5110 | |
5111 | INIT_WORK(&work->work, cma_work_handler); |
5112 | work->id = id_priv; |
5113 | work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; |
5114 | cma_id_get(id_priv); |
5115 | queue_work(wq: cma_wq, work: &work->work); |
5116 | } |
5117 | |
5118 | return 0; |
5119 | } |
5120 | |
5121 | static int cma_netdev_callback(struct notifier_block *self, unsigned long event, |
5122 | void *ptr) |
5123 | { |
5124 | struct net_device *ndev = netdev_notifier_info_to_dev(info: ptr); |
5125 | struct cma_device *cma_dev; |
5126 | struct rdma_id_private *id_priv; |
5127 | int ret = NOTIFY_DONE; |
5128 | |
5129 | if (event != NETDEV_BONDING_FAILOVER) |
5130 | return NOTIFY_DONE; |
5131 | |
5132 | if (!netif_is_bond_master(dev: ndev)) |
5133 | return NOTIFY_DONE; |
5134 | |
5135 | mutex_lock(&lock); |
5136 | list_for_each_entry(cma_dev, &dev_list, list) |
5137 | list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { |
5138 | ret = cma_netdev_change(ndev, id_priv); |
5139 | if (ret) |
5140 | goto out; |
5141 | } |
5142 | |
5143 | out: |
5144 | mutex_unlock(lock: &lock); |
5145 | return ret; |
5146 | } |
5147 | |
5148 | static void cma_netevent_work_handler(struct work_struct *_work) |
5149 | { |
5150 | struct rdma_id_private *id_priv = |
5151 | container_of(_work, struct rdma_id_private, id.net_work); |
5152 | struct rdma_cm_event event = {}; |
5153 | |
5154 | mutex_lock(&id_priv->handler_mutex); |
5155 | |
5156 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
5157 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) |
5158 | goto out_unlock; |
5159 | |
5160 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
5161 | event.status = -ETIMEDOUT; |
5162 | |
5163 | if (cma_cm_event_handler(id_priv, event: &event)) { |
5164 | __acquire(&id_priv->handler_mutex); |
5165 | id_priv->cm_id.ib = NULL; |
5166 | cma_id_put(id_priv); |
5167 | destroy_id_handler_unlock(id_priv); |
5168 | return; |
5169 | } |
5170 | |
5171 | out_unlock: |
5172 | mutex_unlock(lock: &id_priv->handler_mutex); |
5173 | cma_id_put(id_priv); |
5174 | } |
5175 | |
5176 | static int cma_netevent_callback(struct notifier_block *self, |
5177 | unsigned long event, void *ctx) |
5178 | { |
5179 | struct id_table_entry *ips_node = NULL; |
5180 | struct rdma_id_private *current_id; |
5181 | struct neighbour *neigh = ctx; |
5182 | unsigned long flags; |
5183 | |
5184 | if (event != NETEVENT_NEIGH_UPDATE) |
5185 | return NOTIFY_DONE; |
5186 | |
5187 | spin_lock_irqsave(&id_table_lock, flags); |
5188 | if (neigh->tbl->family == AF_INET6) { |
5189 | struct sockaddr_in6 neigh_sock_6; |
5190 | |
5191 | neigh_sock_6.sin6_family = AF_INET6; |
5192 | neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; |
5193 | ips_node = node_from_ndev_ip(root: &id_table, ifindex: neigh->dev->ifindex, |
5194 | sa: (struct sockaddr *)&neigh_sock_6); |
5195 | } else if (neigh->tbl->family == AF_INET) { |
5196 | struct sockaddr_in neigh_sock_4; |
5197 | |
5198 | neigh_sock_4.sin_family = AF_INET; |
5199 | neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); |
5200 | ips_node = node_from_ndev_ip(root: &id_table, ifindex: neigh->dev->ifindex, |
5201 | sa: (struct sockaddr *)&neigh_sock_4); |
5202 | } else |
5203 | goto out; |
5204 | |
5205 | if (!ips_node) |
5206 | goto out; |
5207 | |
5208 | list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { |
5209 | if (!memcmp(p: current_id->id.route.addr.dev_addr.dst_dev_addr, |
5210 | q: neigh->ha, ETH_ALEN)) |
5211 | continue; |
5212 | INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); |
5213 | cma_id_get(id_priv: current_id); |
5214 | queue_work(wq: cma_wq, work: ¤t_id->id.net_work); |
5215 | } |
5216 | out: |
5217 | spin_unlock_irqrestore(lock: &id_table_lock, flags); |
5218 | return NOTIFY_DONE; |
5219 | } |
5220 | |
5221 | static struct notifier_block cma_nb = { |
5222 | .notifier_call = cma_netdev_callback |
5223 | }; |
5224 | |
5225 | static struct notifier_block cma_netevent_cb = { |
5226 | .notifier_call = cma_netevent_callback |
5227 | }; |
5228 | |
5229 | static void cma_send_device_removal_put(struct rdma_id_private *id_priv) |
5230 | { |
5231 | struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; |
5232 | enum rdma_cm_state state; |
5233 | unsigned long flags; |
5234 | |
5235 | mutex_lock(&id_priv->handler_mutex); |
5236 | /* Record that we want to remove the device */ |
5237 | spin_lock_irqsave(&id_priv->lock, flags); |
5238 | state = id_priv->state; |
5239 | if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { |
5240 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
5241 | mutex_unlock(lock: &id_priv->handler_mutex); |
5242 | cma_id_put(id_priv); |
5243 | return; |
5244 | } |
5245 | id_priv->state = RDMA_CM_DEVICE_REMOVAL; |
5246 | spin_unlock_irqrestore(lock: &id_priv->lock, flags); |
5247 | |
5248 | if (cma_cm_event_handler(id_priv, event: &event)) { |
5249 | /* |
5250 | * At this point the ULP promises it won't call |
5251 | * rdma_destroy_id() concurrently |
5252 | */ |
5253 | cma_id_put(id_priv); |
5254 | mutex_unlock(lock: &id_priv->handler_mutex); |
5255 | trace_cm_id_destroy(id_priv); |
5256 | _destroy_id(id_priv, state); |
5257 | return; |
5258 | } |
5259 | mutex_unlock(lock: &id_priv->handler_mutex); |
5260 | |
5261 | /* |
5262 | * If this races with destroy then the thread that first assigns state |
5263 | * to a destroying does the cancel. |
5264 | */ |
5265 | cma_cancel_operation(id_priv, state); |
5266 | cma_id_put(id_priv); |
5267 | } |
5268 | |
5269 | static void cma_process_remove(struct cma_device *cma_dev) |
5270 | { |
5271 | mutex_lock(&lock); |
5272 | while (!list_empty(head: &cma_dev->id_list)) { |
5273 | struct rdma_id_private *id_priv = list_first_entry( |
5274 | &cma_dev->id_list, struct rdma_id_private, device_item); |
5275 | |
5276 | list_del_init(entry: &id_priv->listen_item); |
5277 | list_del_init(entry: &id_priv->device_item); |
5278 | cma_id_get(id_priv); |
5279 | mutex_unlock(lock: &lock); |
5280 | |
5281 | cma_send_device_removal_put(id_priv); |
5282 | |
5283 | mutex_lock(&lock); |
5284 | } |
5285 | mutex_unlock(lock: &lock); |
5286 | |
5287 | cma_dev_put(cma_dev); |
5288 | wait_for_completion(&cma_dev->comp); |
5289 | } |
5290 | |
5291 | static bool cma_supported(struct ib_device *device) |
5292 | { |
5293 | u32 i; |
5294 | |
5295 | rdma_for_each_port(device, i) { |
5296 | if (rdma_cap_ib_cm(device, port_num: i) || rdma_cap_iw_cm(device, port_num: i)) |
5297 | return true; |
5298 | } |
5299 | return false; |
5300 | } |
5301 | |
5302 | static int cma_add_one(struct ib_device *device) |
5303 | { |
5304 | struct rdma_id_private *to_destroy; |
5305 | struct cma_device *cma_dev; |
5306 | struct rdma_id_private *id_priv; |
5307 | unsigned long supported_gids = 0; |
5308 | int ret; |
5309 | u32 i; |
5310 | |
5311 | if (!cma_supported(device)) |
5312 | return -EOPNOTSUPP; |
5313 | |
5314 | cma_dev = kmalloc(size: sizeof(*cma_dev), GFP_KERNEL); |
5315 | if (!cma_dev) |
5316 | return -ENOMEM; |
5317 | |
5318 | cma_dev->device = device; |
5319 | cma_dev->default_gid_type = kcalloc(n: device->phys_port_cnt, |
5320 | size: sizeof(*cma_dev->default_gid_type), |
5321 | GFP_KERNEL); |
5322 | if (!cma_dev->default_gid_type) { |
5323 | ret = -ENOMEM; |
5324 | goto free_cma_dev; |
5325 | } |
5326 | |
5327 | cma_dev->default_roce_tos = kcalloc(n: device->phys_port_cnt, |
5328 | size: sizeof(*cma_dev->default_roce_tos), |
5329 | GFP_KERNEL); |
5330 | if (!cma_dev->default_roce_tos) { |
5331 | ret = -ENOMEM; |
5332 | goto free_gid_type; |
5333 | } |
5334 | |
5335 | rdma_for_each_port (device, i) { |
5336 | supported_gids = roce_gid_type_mask_support(ib_dev: device, port: i); |
5337 | WARN_ON(!supported_gids); |
5338 | if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) |
5339 | cma_dev->default_gid_type[i - rdma_start_port(device)] = |
5340 | CMA_PREFERRED_ROCE_GID_TYPE; |
5341 | else |
5342 | cma_dev->default_gid_type[i - rdma_start_port(device)] = |
5343 | find_first_bit(addr: &supported_gids, BITS_PER_LONG); |
5344 | cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; |
5345 | } |
5346 | |
5347 | init_completion(x: &cma_dev->comp); |
5348 | refcount_set(r: &cma_dev->refcount, n: 1); |
5349 | INIT_LIST_HEAD(list: &cma_dev->id_list); |
5350 | ib_set_client_data(device, client: &cma_client, data: cma_dev); |
5351 | |
5352 | mutex_lock(&lock); |
5353 | list_add_tail(new: &cma_dev->list, head: &dev_list); |
5354 | list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { |
5355 | ret = cma_listen_on_dev(id_priv, cma_dev, to_destroy: &to_destroy); |
5356 | if (ret) |
5357 | goto free_listen; |
5358 | } |
5359 | mutex_unlock(lock: &lock); |
5360 | |
5361 | trace_cm_add_one(device); |
5362 | return 0; |
5363 | |
5364 | free_listen: |
5365 | list_del(entry: &cma_dev->list); |
5366 | mutex_unlock(lock: &lock); |
5367 | |
5368 | /* cma_process_remove() will delete to_destroy */ |
5369 | cma_process_remove(cma_dev); |
5370 | kfree(objp: cma_dev->default_roce_tos); |
5371 | free_gid_type: |
5372 | kfree(objp: cma_dev->default_gid_type); |
5373 | |
5374 | free_cma_dev: |
5375 | kfree(objp: cma_dev); |
5376 | return ret; |
5377 | } |
5378 | |
5379 | static void cma_remove_one(struct ib_device *device, void *client_data) |
5380 | { |
5381 | struct cma_device *cma_dev = client_data; |
5382 | |
5383 | trace_cm_remove_one(device); |
5384 | |
5385 | mutex_lock(&lock); |
5386 | list_del(entry: &cma_dev->list); |
5387 | mutex_unlock(lock: &lock); |
5388 | |
5389 | cma_process_remove(cma_dev); |
5390 | kfree(objp: cma_dev->default_roce_tos); |
5391 | kfree(objp: cma_dev->default_gid_type); |
5392 | kfree(objp: cma_dev); |
5393 | } |
5394 | |
5395 | static int cma_init_net(struct net *net) |
5396 | { |
5397 | struct cma_pernet *pernet = cma_pernet(net); |
5398 | |
5399 | xa_init(xa: &pernet->tcp_ps); |
5400 | xa_init(xa: &pernet->udp_ps); |
5401 | xa_init(xa: &pernet->ipoib_ps); |
5402 | xa_init(xa: &pernet->ib_ps); |
5403 | |
5404 | return 0; |
5405 | } |
5406 | |
5407 | static void cma_exit_net(struct net *net) |
5408 | { |
5409 | struct cma_pernet *pernet = cma_pernet(net); |
5410 | |
5411 | WARN_ON(!xa_empty(&pernet->tcp_ps)); |
5412 | WARN_ON(!xa_empty(&pernet->udp_ps)); |
5413 | WARN_ON(!xa_empty(&pernet->ipoib_ps)); |
5414 | WARN_ON(!xa_empty(&pernet->ib_ps)); |
5415 | } |
5416 | |
5417 | static struct pernet_operations cma_pernet_operations = { |
5418 | .init = cma_init_net, |
5419 | .exit = cma_exit_net, |
5420 | .id = &cma_pernet_id, |
5421 | .size = sizeof(struct cma_pernet), |
5422 | }; |
5423 | |
5424 | static int __init cma_init(void) |
5425 | { |
5426 | int ret; |
5427 | |
5428 | /* |
5429 | * There is a rare lock ordering dependency in cma_netdev_callback() |
5430 | * that only happens when bonding is enabled. Teach lockdep that rtnl |
5431 | * must never be nested under lock so it can find these without having |
5432 | * to test with bonding. |
5433 | */ |
5434 | if (IS_ENABLED(CONFIG_LOCKDEP)) { |
5435 | rtnl_lock(); |
5436 | mutex_lock(&lock); |
5437 | mutex_unlock(lock: &lock); |
5438 | rtnl_unlock(); |
5439 | } |
5440 | |
5441 | cma_wq = alloc_ordered_workqueue("rdma_cm" , WQ_MEM_RECLAIM); |
5442 | if (!cma_wq) |
5443 | return -ENOMEM; |
5444 | |
5445 | ret = register_pernet_subsys(&cma_pernet_operations); |
5446 | if (ret) |
5447 | goto err_wq; |
5448 | |
5449 | ib_sa_register_client(client: &sa_client); |
5450 | register_netdevice_notifier(nb: &cma_nb); |
5451 | register_netevent_notifier(nb: &cma_netevent_cb); |
5452 | |
5453 | ret = ib_register_client(client: &cma_client); |
5454 | if (ret) |
5455 | goto err; |
5456 | |
5457 | ret = cma_configfs_init(); |
5458 | if (ret) |
5459 | goto err_ib; |
5460 | |
5461 | return 0; |
5462 | |
5463 | err_ib: |
5464 | ib_unregister_client(client: &cma_client); |
5465 | err: |
5466 | unregister_netevent_notifier(nb: &cma_netevent_cb); |
5467 | unregister_netdevice_notifier(nb: &cma_nb); |
5468 | ib_sa_unregister_client(client: &sa_client); |
5469 | unregister_pernet_subsys(&cma_pernet_operations); |
5470 | err_wq: |
5471 | destroy_workqueue(wq: cma_wq); |
5472 | return ret; |
5473 | } |
5474 | |
5475 | static void __exit cma_cleanup(void) |
5476 | { |
5477 | cma_configfs_exit(); |
5478 | ib_unregister_client(client: &cma_client); |
5479 | unregister_netevent_notifier(nb: &cma_netevent_cb); |
5480 | unregister_netdevice_notifier(nb: &cma_nb); |
5481 | ib_sa_unregister_client(client: &sa_client); |
5482 | unregister_pernet_subsys(&cma_pernet_operations); |
5483 | destroy_workqueue(wq: cma_wq); |
5484 | } |
5485 | |
5486 | module_init(cma_init); |
5487 | module_exit(cma_cleanup); |
5488 | |