1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
8 */
9
10#include <linux/completion.h>
11#include <linux/dma-mapping.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/idr.h>
16#include <linux/interrupt.h>
17#include <linux/random.h>
18#include <linux/rbtree.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/sysfs.h>
22#include <linux/workqueue.h>
23#include <linux/kdev_t.h>
24#include <linux/etherdevice.h>
25
26#include <rdma/ib_cache.h>
27#include <rdma/ib_cm.h>
28#include <rdma/ib_sysfs.h>
29#include "cm_msgs.h"
30#include "core_priv.h"
31#include "cm_trace.h"
32
33MODULE_AUTHOR("Sean Hefty");
34MODULE_DESCRIPTION("InfiniBand CM");
35MODULE_LICENSE("Dual BSD/GPL");
36
37#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
38static const char * const ibcm_rej_reason_strs[] = {
39 [IB_CM_REJ_NO_QP] = "no QP",
40 [IB_CM_REJ_NO_EEC] = "no EEC",
41 [IB_CM_REJ_NO_RESOURCES] = "no resources",
42 [IB_CM_REJ_TIMEOUT] = "timeout",
43 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
44 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
45 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
46 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
47 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
48 [IB_CM_REJ_STALE_CONN] = "stale conn",
49 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
50 [IB_CM_REJ_INVALID_GID] = "invalid GID",
51 [IB_CM_REJ_INVALID_LID] = "invalid LID",
52 [IB_CM_REJ_INVALID_SL] = "invalid SL",
53 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
54 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
55 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
56 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
57 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
58 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
59 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
60 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
61 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
62 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
63 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
64 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
65 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
66 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
67 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
68 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
69 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
70 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
71 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
72 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
73 "vendor option is not supported",
74};
75
76const char *__attribute_const__ ibcm_reject_msg(int reason)
77{
78 size_t index = reason;
79
80 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
81 ibcm_rej_reason_strs[index])
82 return ibcm_rej_reason_strs[index];
83 else
84 return "unrecognized reason";
85}
86EXPORT_SYMBOL(ibcm_reject_msg);
87
88struct cm_id_private;
89struct cm_work;
90static int cm_add_one(struct ib_device *device);
91static void cm_remove_one(struct ib_device *device, void *client_data);
92static void cm_process_work(struct cm_id_private *cm_id_priv,
93 struct cm_work *work);
94static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
95 struct ib_cm_sidr_rep_param *param);
96static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
97 const void *private_data, u8 private_data_len);
98static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
99 void *private_data, u8 private_data_len);
100static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
101 enum ib_cm_rej_reason reason, void *ari,
102 u8 ari_length, const void *private_data,
103 u8 private_data_len);
104
105static struct ib_client cm_client = {
106 .name = "cm",
107 .add = cm_add_one,
108 .remove = cm_remove_one
109};
110
111static struct ib_cm {
112 spinlock_t lock;
113 struct list_head device_list;
114 rwlock_t device_lock;
115 struct rb_root listen_service_table;
116 u64 listen_service_id;
117 /* struct rb_root peer_service_table; todo: fix peer to peer */
118 struct rb_root remote_qp_table;
119 struct rb_root remote_id_table;
120 struct rb_root remote_sidr_table;
121 struct xarray local_id_table;
122 u32 local_id_next;
123 __be32 random_id_operand;
124 struct list_head timewait_list;
125 struct workqueue_struct *wq;
126} cm;
127
128/* Counter indexes ordered by attribute ID */
129enum {
130 CM_REQ_COUNTER,
131 CM_MRA_COUNTER,
132 CM_REJ_COUNTER,
133 CM_REP_COUNTER,
134 CM_RTU_COUNTER,
135 CM_DREQ_COUNTER,
136 CM_DREP_COUNTER,
137 CM_SIDR_REQ_COUNTER,
138 CM_SIDR_REP_COUNTER,
139 CM_LAP_COUNTER,
140 CM_APR_COUNTER,
141 CM_ATTR_COUNT,
142 CM_ATTR_ID_OFFSET = 0x0010,
143};
144
145enum {
146 CM_XMIT,
147 CM_XMIT_RETRIES,
148 CM_RECV,
149 CM_RECV_DUPLICATES,
150 CM_COUNTER_GROUPS
151};
152
153struct cm_counter_attribute {
154 struct ib_port_attribute attr;
155 unsigned short group;
156 unsigned short index;
157};
158
159struct cm_port {
160 struct cm_device *cm_dev;
161 struct ib_mad_agent *mad_agent;
162 u32 port_num;
163 atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
164};
165
166struct cm_device {
167 struct kref kref;
168 struct list_head list;
169 spinlock_t mad_agent_lock;
170 struct ib_device *ib_device;
171 u8 ack_delay;
172 int going_down;
173 struct cm_port *port[];
174};
175
176struct cm_av {
177 struct cm_port *port;
178 struct rdma_ah_attr ah_attr;
179 u16 dlid_datapath;
180 u16 pkey_index;
181 u8 timeout;
182};
183
184struct cm_work {
185 struct delayed_work work;
186 struct list_head list;
187 struct cm_port *port;
188 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
189 __be32 local_id; /* Established / timewait */
190 __be32 remote_id;
191 struct ib_cm_event cm_event;
192 struct sa_path_rec path[];
193};
194
195struct cm_timewait_info {
196 struct cm_work work;
197 struct list_head list;
198 struct rb_node remote_qp_node;
199 struct rb_node remote_id_node;
200 __be64 remote_ca_guid;
201 __be32 remote_qpn;
202 u8 inserted_remote_qp;
203 u8 inserted_remote_id;
204};
205
206struct cm_id_private {
207 struct ib_cm_id id;
208
209 struct rb_node service_node;
210 struct rb_node sidr_id_node;
211 u32 sidr_slid;
212 spinlock_t lock; /* Do not acquire inside cm.lock */
213 struct completion comp;
214 refcount_t refcount;
215 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
216 * Protected by the cm.lock spinlock.
217 */
218 int listen_sharecount;
219 struct rcu_head rcu;
220
221 struct ib_mad_send_buf *msg;
222 struct cm_timewait_info *timewait_info;
223 /* todo: use alternate port on send failure */
224 struct cm_av av;
225 struct cm_av alt_av;
226
227 void *private_data;
228 __be64 tid;
229 __be32 local_qpn;
230 __be32 remote_qpn;
231 enum ib_qp_type qp_type;
232 __be32 sq_psn;
233 __be32 rq_psn;
234 int timeout_ms;
235 enum ib_mtu path_mtu;
236 __be16 pkey;
237 u8 private_data_len;
238 u8 max_cm_retries;
239 u8 responder_resources;
240 u8 initiator_depth;
241 u8 retry_count;
242 u8 rnr_retry_count;
243 u8 service_timeout;
244 u8 target_ack_delay;
245
246 struct list_head work_list;
247 atomic_t work_count;
248
249 struct rdma_ucm_ece ece;
250};
251
252static void cm_dev_release(struct kref *kref)
253{
254 struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
255 u32 i;
256
257 rdma_for_each_port(cm_dev->ib_device, i)
258 kfree(objp: cm_dev->port[i - 1]);
259
260 kfree(objp: cm_dev);
261}
262
263static void cm_device_put(struct cm_device *cm_dev)
264{
265 kref_put(kref: &cm_dev->kref, release: cm_dev_release);
266}
267
268static void cm_work_handler(struct work_struct *work);
269
270static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
271{
272 if (refcount_dec_and_test(r: &cm_id_priv->refcount))
273 complete(&cm_id_priv->comp);
274}
275
276static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
277{
278 struct ib_mad_agent *mad_agent;
279 struct ib_mad_send_buf *m;
280 struct ib_ah *ah;
281
282 lockdep_assert_held(&cm_id_priv->lock);
283
284 if (!cm_id_priv->av.port)
285 return ERR_PTR(error: -EINVAL);
286
287 spin_lock(lock: &cm_id_priv->av.port->cm_dev->mad_agent_lock);
288 mad_agent = cm_id_priv->av.port->mad_agent;
289 if (!mad_agent) {
290 m = ERR_PTR(error: -EINVAL);
291 goto out;
292 }
293
294 ah = rdma_create_ah(pd: mad_agent->qp->pd, ah_attr: &cm_id_priv->av.ah_attr, flags: 0);
295 if (IS_ERR(ptr: ah)) {
296 m = ERR_CAST(ptr: ah);
297 goto out;
298 }
299
300 m = ib_create_send_mad(mad_agent, remote_qpn: cm_id_priv->id.remote_cm_qpn,
301 pkey_index: cm_id_priv->av.pkey_index,
302 rmpp_active: 0, hdr_len: IB_MGMT_MAD_HDR, data_len: IB_MGMT_MAD_DATA,
303 GFP_ATOMIC,
304 IB_MGMT_BASE_VERSION);
305 if (IS_ERR(ptr: m)) {
306 rdma_destroy_ah(ah, flags: 0);
307 goto out;
308 }
309
310 /* Timeout set by caller if response is expected. */
311 m->ah = ah;
312 m->retries = cm_id_priv->max_cm_retries;
313
314 refcount_inc(r: &cm_id_priv->refcount);
315 m->context[0] = cm_id_priv;
316
317out:
318 spin_unlock(lock: &cm_id_priv->av.port->cm_dev->mad_agent_lock);
319 return m;
320}
321
322static void cm_free_msg(struct ib_mad_send_buf *msg)
323{
324 struct cm_id_private *cm_id_priv = msg->context[0];
325
326 if (msg->ah)
327 rdma_destroy_ah(ah: msg->ah, flags: 0);
328 cm_deref_id(cm_id_priv);
329 ib_free_send_mad(send_buf: msg);
330}
331
332static struct ib_mad_send_buf *
333cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
334{
335 struct ib_mad_send_buf *msg;
336
337 lockdep_assert_held(&cm_id_priv->lock);
338
339 msg = cm_alloc_msg(cm_id_priv);
340 if (IS_ERR(ptr: msg))
341 return msg;
342 cm_id_priv->msg = msg;
343 return msg;
344}
345
346static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
347{
348 struct cm_id_private *cm_id_priv = msg->context[0];
349
350 lockdep_assert_held(&cm_id_priv->lock);
351
352 if (!WARN_ON(cm_id_priv->msg != msg))
353 cm_id_priv->msg = NULL;
354
355 if (msg->ah)
356 rdma_destroy_ah(ah: msg->ah, flags: 0);
357 cm_deref_id(cm_id_priv);
358 ib_free_send_mad(send_buf: msg);
359}
360
361static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
362 struct ib_mad_recv_wc *mad_recv_wc)
363{
364 return ib_create_send_mad(mad_agent: port->mad_agent, remote_qpn: 1, pkey_index: mad_recv_wc->wc->pkey_index,
365 rmpp_active: 0, hdr_len: IB_MGMT_MAD_HDR, data_len: IB_MGMT_MAD_DATA,
366 GFP_ATOMIC,
367 IB_MGMT_BASE_VERSION);
368}
369
370static int cm_create_response_msg_ah(struct cm_port *port,
371 struct ib_mad_recv_wc *mad_recv_wc,
372 struct ib_mad_send_buf *msg)
373{
374 struct ib_ah *ah;
375
376 ah = ib_create_ah_from_wc(pd: port->mad_agent->qp->pd, wc: mad_recv_wc->wc,
377 grh: mad_recv_wc->recv_buf.grh, port_num: port->port_num);
378 if (IS_ERR(ptr: ah))
379 return PTR_ERR(ptr: ah);
380
381 msg->ah = ah;
382 return 0;
383}
384
385static int cm_alloc_response_msg(struct cm_port *port,
386 struct ib_mad_recv_wc *mad_recv_wc,
387 struct ib_mad_send_buf **msg)
388{
389 struct ib_mad_send_buf *m;
390 int ret;
391
392 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
393 if (IS_ERR(ptr: m))
394 return PTR_ERR(ptr: m);
395
396 ret = cm_create_response_msg_ah(port, mad_recv_wc, msg: m);
397 if (ret) {
398 ib_free_send_mad(send_buf: m);
399 return ret;
400 }
401
402 *msg = m;
403 return 0;
404}
405
406static void cm_free_response_msg(struct ib_mad_send_buf *msg)
407{
408 if (msg->ah)
409 rdma_destroy_ah(ah: msg->ah, flags: 0);
410 ib_free_send_mad(send_buf: msg);
411}
412
413static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
414{
415 void *data;
416
417 if (!private_data || !private_data_len)
418 return NULL;
419
420 data = kmemdup(p: private_data, size: private_data_len, GFP_KERNEL);
421 if (!data)
422 return ERR_PTR(error: -ENOMEM);
423
424 return data;
425}
426
427static void cm_set_private_data(struct cm_id_private *cm_id_priv,
428 void *private_data, u8 private_data_len)
429{
430 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
431 kfree(objp: cm_id_priv->private_data);
432
433 cm_id_priv->private_data = private_data;
434 cm_id_priv->private_data_len = private_data_len;
435}
436
437static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
438{
439 struct cm_port *old_port = av->port;
440
441 if (old_port == port)
442 return;
443
444 av->port = port;
445 if (old_port)
446 cm_device_put(cm_dev: old_port->cm_dev);
447 if (port)
448 kref_get(kref: &port->cm_dev->kref);
449}
450
451static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
452 struct rdma_ah_attr *ah_attr, struct cm_av *av)
453{
454 cm_set_av_port(av, port);
455 av->pkey_index = wc->pkey_index;
456 rdma_move_ah_attr(dest: &av->ah_attr, src: ah_attr);
457}
458
459static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
460 struct ib_grh *grh, struct cm_av *av)
461{
462 cm_set_av_port(av, port);
463 av->pkey_index = wc->pkey_index;
464 return ib_init_ah_attr_from_wc(device: port->cm_dev->ib_device,
465 port_num: port->port_num, wc,
466 grh, ah_attr: &av->ah_attr);
467}
468
469static struct cm_port *
470get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
471{
472 struct cm_device *cm_dev;
473 struct cm_port *port = NULL;
474 unsigned long flags;
475
476 if (attr) {
477 read_lock_irqsave(&cm.device_lock, flags);
478 list_for_each_entry(cm_dev, &cm.device_list, list) {
479 if (cm_dev->ib_device == attr->device) {
480 port = cm_dev->port[attr->port_num - 1];
481 break;
482 }
483 }
484 read_unlock_irqrestore(&cm.device_lock, flags);
485 } else {
486 /* SGID attribute can be NULL in following
487 * conditions.
488 * (a) Alternative path
489 * (b) IB link layer without GRH
490 * (c) LAP send messages
491 */
492 read_lock_irqsave(&cm.device_lock, flags);
493 list_for_each_entry(cm_dev, &cm.device_list, list) {
494 attr = rdma_find_gid(device: cm_dev->ib_device,
495 gid: &path->sgid,
496 gid_type: sa_conv_pathrec_to_gid_type(rec: path),
497 NULL);
498 if (!IS_ERR(ptr: attr)) {
499 port = cm_dev->port[attr->port_num - 1];
500 break;
501 }
502 }
503 read_unlock_irqrestore(&cm.device_lock, flags);
504 if (port)
505 rdma_put_gid_attr(attr);
506 }
507 return port;
508}
509
510static int cm_init_av_by_path(struct sa_path_rec *path,
511 const struct ib_gid_attr *sgid_attr,
512 struct cm_av *av)
513{
514 struct rdma_ah_attr new_ah_attr;
515 struct cm_device *cm_dev;
516 struct cm_port *port;
517 int ret;
518
519 port = get_cm_port_from_path(path, attr: sgid_attr);
520 if (!port)
521 return -EINVAL;
522 cm_dev = port->cm_dev;
523
524 ret = ib_find_cached_pkey(device: cm_dev->ib_device, port_num: port->port_num,
525 be16_to_cpu(path->pkey), index: &av->pkey_index);
526 if (ret)
527 return ret;
528
529 cm_set_av_port(av, port);
530
531 /*
532 * av->ah_attr might be initialized based on wc or during
533 * request processing time which might have reference to sgid_attr.
534 * So initialize a new ah_attr on stack.
535 * If initialization fails, old ah_attr is used for sending any
536 * responses. If initialization is successful, than new ah_attr
537 * is used by overwriting the old one. So that right ah_attr
538 * can be used to return an error response.
539 */
540 ret = ib_init_ah_attr_from_path(device: cm_dev->ib_device, port_num: port->port_num, rec: path,
541 ah_attr: &new_ah_attr, sgid_attr);
542 if (ret)
543 return ret;
544
545 av->timeout = path->packet_life_time + 1;
546 rdma_move_ah_attr(dest: &av->ah_attr, src: &new_ah_attr);
547 return 0;
548}
549
550/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
551static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
552{
553 cm_set_av_port(av: dest, port: src->port);
554 cm_set_av_port(av: src, NULL);
555 dest->pkey_index = src->pkey_index;
556 rdma_move_ah_attr(dest: &dest->ah_attr, src: &src->ah_attr);
557 dest->timeout = src->timeout;
558}
559
560static void cm_destroy_av(struct cm_av *av)
561{
562 rdma_destroy_ah_attr(ah_attr: &av->ah_attr);
563 cm_set_av_port(av, NULL);
564}
565
566static u32 cm_local_id(__be32 local_id)
567{
568 return (__force u32) (local_id ^ cm.random_id_operand);
569}
570
571static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
572{
573 struct cm_id_private *cm_id_priv;
574
575 rcu_read_lock();
576 cm_id_priv = xa_load(&cm.local_id_table, index: cm_local_id(local_id));
577 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
578 !refcount_inc_not_zero(r: &cm_id_priv->refcount))
579 cm_id_priv = NULL;
580 rcu_read_unlock();
581
582 return cm_id_priv;
583}
584
585/*
586 * Trivial helpers to strip endian annotation and compare; the
587 * endianness doesn't actually matter since we just need a stable
588 * order for the RB tree.
589 */
590static int be32_lt(__be32 a, __be32 b)
591{
592 return (__force u32) a < (__force u32) b;
593}
594
595static int be32_gt(__be32 a, __be32 b)
596{
597 return (__force u32) a > (__force u32) b;
598}
599
600static int be64_lt(__be64 a, __be64 b)
601{
602 return (__force u64) a < (__force u64) b;
603}
604
605static int be64_gt(__be64 a, __be64 b)
606{
607 return (__force u64) a > (__force u64) b;
608}
609
610/*
611 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
612 * if the new ID was inserted, NULL if it could not be inserted due to a
613 * collision, or the existing cm_id_priv ready for shared usage.
614 */
615static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
616 ib_cm_handler shared_handler)
617{
618 struct rb_node **link = &cm.listen_service_table.rb_node;
619 struct rb_node *parent = NULL;
620 struct cm_id_private *cur_cm_id_priv;
621 __be64 service_id = cm_id_priv->id.service_id;
622 unsigned long flags;
623
624 spin_lock_irqsave(&cm.lock, flags);
625 while (*link) {
626 parent = *link;
627 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
628 service_node);
629
630 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
631 link = &(*link)->rb_left;
632 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
633 link = &(*link)->rb_right;
634 else if (be64_lt(a: service_id, b: cur_cm_id_priv->id.service_id))
635 link = &(*link)->rb_left;
636 else if (be64_gt(a: service_id, b: cur_cm_id_priv->id.service_id))
637 link = &(*link)->rb_right;
638 else {
639 /*
640 * Sharing an ib_cm_id with different handlers is not
641 * supported
642 */
643 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
644 cur_cm_id_priv->id.context ||
645 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
646 spin_unlock_irqrestore(lock: &cm.lock, flags);
647 return NULL;
648 }
649 refcount_inc(r: &cur_cm_id_priv->refcount);
650 cur_cm_id_priv->listen_sharecount++;
651 spin_unlock_irqrestore(lock: &cm.lock, flags);
652 return cur_cm_id_priv;
653 }
654 }
655 cm_id_priv->listen_sharecount++;
656 rb_link_node(node: &cm_id_priv->service_node, parent, rb_link: link);
657 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
658 spin_unlock_irqrestore(lock: &cm.lock, flags);
659 return cm_id_priv;
660}
661
662static struct cm_id_private *cm_find_listen(struct ib_device *device,
663 __be64 service_id)
664{
665 struct rb_node *node = cm.listen_service_table.rb_node;
666 struct cm_id_private *cm_id_priv;
667
668 while (node) {
669 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
670
671 if (device < cm_id_priv->id.device)
672 node = node->rb_left;
673 else if (device > cm_id_priv->id.device)
674 node = node->rb_right;
675 else if (be64_lt(a: service_id, b: cm_id_priv->id.service_id))
676 node = node->rb_left;
677 else if (be64_gt(a: service_id, b: cm_id_priv->id.service_id))
678 node = node->rb_right;
679 else {
680 refcount_inc(r: &cm_id_priv->refcount);
681 return cm_id_priv;
682 }
683 }
684 return NULL;
685}
686
687static struct cm_timewait_info *
688cm_insert_remote_id(struct cm_timewait_info *timewait_info)
689{
690 struct rb_node **link = &cm.remote_id_table.rb_node;
691 struct rb_node *parent = NULL;
692 struct cm_timewait_info *cur_timewait_info;
693 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
694 __be32 remote_id = timewait_info->work.remote_id;
695
696 while (*link) {
697 parent = *link;
698 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
699 remote_id_node);
700 if (be32_lt(a: remote_id, b: cur_timewait_info->work.remote_id))
701 link = &(*link)->rb_left;
702 else if (be32_gt(a: remote_id, b: cur_timewait_info->work.remote_id))
703 link = &(*link)->rb_right;
704 else if (be64_lt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid))
705 link = &(*link)->rb_left;
706 else if (be64_gt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid))
707 link = &(*link)->rb_right;
708 else
709 return cur_timewait_info;
710 }
711 timewait_info->inserted_remote_id = 1;
712 rb_link_node(node: &timewait_info->remote_id_node, parent, rb_link: link);
713 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
714 return NULL;
715}
716
717static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
718 __be32 remote_id)
719{
720 struct rb_node *node = cm.remote_id_table.rb_node;
721 struct cm_timewait_info *timewait_info;
722 struct cm_id_private *res = NULL;
723
724 spin_lock_irq(lock: &cm.lock);
725 while (node) {
726 timewait_info = rb_entry(node, struct cm_timewait_info,
727 remote_id_node);
728 if (be32_lt(a: remote_id, b: timewait_info->work.remote_id))
729 node = node->rb_left;
730 else if (be32_gt(a: remote_id, b: timewait_info->work.remote_id))
731 node = node->rb_right;
732 else if (be64_lt(a: remote_ca_guid, b: timewait_info->remote_ca_guid))
733 node = node->rb_left;
734 else if (be64_gt(a: remote_ca_guid, b: timewait_info->remote_ca_guid))
735 node = node->rb_right;
736 else {
737 res = cm_acquire_id(local_id: timewait_info->work.local_id,
738 remote_id: timewait_info->work.remote_id);
739 break;
740 }
741 }
742 spin_unlock_irq(lock: &cm.lock);
743 return res;
744}
745
746static struct cm_timewait_info *
747cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
748{
749 struct rb_node **link = &cm.remote_qp_table.rb_node;
750 struct rb_node *parent = NULL;
751 struct cm_timewait_info *cur_timewait_info;
752 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
753 __be32 remote_qpn = timewait_info->remote_qpn;
754
755 while (*link) {
756 parent = *link;
757 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
758 remote_qp_node);
759 if (be32_lt(a: remote_qpn, b: cur_timewait_info->remote_qpn))
760 link = &(*link)->rb_left;
761 else if (be32_gt(a: remote_qpn, b: cur_timewait_info->remote_qpn))
762 link = &(*link)->rb_right;
763 else if (be64_lt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid))
764 link = &(*link)->rb_left;
765 else if (be64_gt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid))
766 link = &(*link)->rb_right;
767 else
768 return cur_timewait_info;
769 }
770 timewait_info->inserted_remote_qp = 1;
771 rb_link_node(node: &timewait_info->remote_qp_node, parent, rb_link: link);
772 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
773 return NULL;
774}
775
776static struct cm_id_private *
777cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
778{
779 struct rb_node **link = &cm.remote_sidr_table.rb_node;
780 struct rb_node *parent = NULL;
781 struct cm_id_private *cur_cm_id_priv;
782 __be32 remote_id = cm_id_priv->id.remote_id;
783
784 while (*link) {
785 parent = *link;
786 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
787 sidr_id_node);
788 if (be32_lt(a: remote_id, b: cur_cm_id_priv->id.remote_id))
789 link = &(*link)->rb_left;
790 else if (be32_gt(a: remote_id, b: cur_cm_id_priv->id.remote_id))
791 link = &(*link)->rb_right;
792 else {
793 if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
794 link = &(*link)->rb_left;
795 else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
796 link = &(*link)->rb_right;
797 else
798 return cur_cm_id_priv;
799 }
800 }
801 rb_link_node(node: &cm_id_priv->sidr_id_node, parent, rb_link: link);
802 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
803 return NULL;
804}
805
806static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
807 ib_cm_handler cm_handler,
808 void *context)
809{
810 struct cm_id_private *cm_id_priv;
811 u32 id;
812 int ret;
813
814 cm_id_priv = kzalloc(size: sizeof *cm_id_priv, GFP_KERNEL);
815 if (!cm_id_priv)
816 return ERR_PTR(error: -ENOMEM);
817
818 cm_id_priv->id.state = IB_CM_IDLE;
819 cm_id_priv->id.device = device;
820 cm_id_priv->id.cm_handler = cm_handler;
821 cm_id_priv->id.context = context;
822 cm_id_priv->id.remote_cm_qpn = 1;
823
824 RB_CLEAR_NODE(&cm_id_priv->service_node);
825 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
826 spin_lock_init(&cm_id_priv->lock);
827 init_completion(x: &cm_id_priv->comp);
828 INIT_LIST_HEAD(list: &cm_id_priv->work_list);
829 atomic_set(v: &cm_id_priv->work_count, i: -1);
830 refcount_set(r: &cm_id_priv->refcount, n: 1);
831
832 ret = xa_alloc_cyclic(xa: &cm.local_id_table, id: &id, NULL, xa_limit_32b,
833 next: &cm.local_id_next, GFP_KERNEL);
834 if (ret < 0)
835 goto error;
836 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
837
838 return cm_id_priv;
839
840error:
841 kfree(objp: cm_id_priv);
842 return ERR_PTR(error: ret);
843}
844
845/*
846 * Make the ID visible to the MAD handlers and other threads that use the
847 * xarray.
848 */
849static void cm_finalize_id(struct cm_id_private *cm_id_priv)
850{
851 xa_store(&cm.local_id_table, index: cm_local_id(local_id: cm_id_priv->id.local_id),
852 entry: cm_id_priv, GFP_ATOMIC);
853}
854
855struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
856 ib_cm_handler cm_handler,
857 void *context)
858{
859 struct cm_id_private *cm_id_priv;
860
861 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
862 if (IS_ERR(ptr: cm_id_priv))
863 return ERR_CAST(ptr: cm_id_priv);
864
865 cm_finalize_id(cm_id_priv);
866 return &cm_id_priv->id;
867}
868EXPORT_SYMBOL(ib_create_cm_id);
869
870static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
871{
872 struct cm_work *work;
873
874 if (list_empty(head: &cm_id_priv->work_list))
875 return NULL;
876
877 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
878 list_del(entry: &work->list);
879 return work;
880}
881
882static void cm_free_work(struct cm_work *work)
883{
884 if (work->mad_recv_wc)
885 ib_free_recv_mad(mad_recv_wc: work->mad_recv_wc);
886 kfree(objp: work);
887}
888
889static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
890 struct cm_work *work)
891 __releases(&cm_id_priv->lock)
892{
893 bool immediate;
894
895 /*
896 * To deliver the event to the user callback we have the drop the
897 * spinlock, however, we need to ensure that the user callback is single
898 * threaded and receives events in the temporal order. If there are
899 * already events being processed then thread new events onto a list,
900 * the thread currently processing will pick them up.
901 */
902 immediate = atomic_inc_and_test(v: &cm_id_priv->work_count);
903 if (!immediate) {
904 list_add_tail(new: &work->list, head: &cm_id_priv->work_list);
905 /*
906 * This routine always consumes incoming reference. Once queued
907 * to the work_list then a reference is held by the thread
908 * currently running cm_process_work() and this reference is not
909 * needed.
910 */
911 cm_deref_id(cm_id_priv);
912 }
913 spin_unlock_irq(lock: &cm_id_priv->lock);
914
915 if (immediate)
916 cm_process_work(cm_id_priv, work);
917}
918
919static inline int cm_convert_to_ms(int iba_time)
920{
921 /* approximate conversion to ms from 4.096us x 2^iba_time */
922 return 1 << max(iba_time - 8, 0);
923}
924
925/*
926 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
927 * Because of how ack_timeout is stored, adding one doubles the timeout.
928 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
929 * increment it (round up) only if the other is within 50%.
930 */
931static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
932{
933 int ack_timeout = packet_life_time + 1;
934
935 if (ack_timeout >= ca_ack_delay)
936 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
937 else
938 ack_timeout = ca_ack_delay +
939 (ack_timeout >= (ca_ack_delay - 1));
940
941 return min(31, ack_timeout);
942}
943
944static void cm_remove_remote(struct cm_id_private *cm_id_priv)
945{
946 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
947
948 if (timewait_info->inserted_remote_id) {
949 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
950 timewait_info->inserted_remote_id = 0;
951 }
952
953 if (timewait_info->inserted_remote_qp) {
954 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
955 timewait_info->inserted_remote_qp = 0;
956 }
957}
958
959static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
960{
961 struct cm_timewait_info *timewait_info;
962
963 timewait_info = kzalloc(size: sizeof *timewait_info, GFP_KERNEL);
964 if (!timewait_info)
965 return ERR_PTR(error: -ENOMEM);
966
967 timewait_info->work.local_id = local_id;
968 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
969 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
970 return timewait_info;
971}
972
973static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
974{
975 int wait_time;
976 unsigned long flags;
977 struct cm_device *cm_dev;
978
979 lockdep_assert_held(&cm_id_priv->lock);
980
981 cm_dev = ib_get_client_data(device: cm_id_priv->id.device, client: &cm_client);
982 if (!cm_dev)
983 return;
984
985 spin_lock_irqsave(&cm.lock, flags);
986 cm_remove_remote(cm_id_priv);
987 list_add_tail(new: &cm_id_priv->timewait_info->list, head: &cm.timewait_list);
988 spin_unlock_irqrestore(lock: &cm.lock, flags);
989
990 /*
991 * The cm_id could be destroyed by the user before we exit timewait.
992 * To protect against this, we search for the cm_id after exiting
993 * timewait before notifying the user that we've exited timewait.
994 */
995 cm_id_priv->id.state = IB_CM_TIMEWAIT;
996 wait_time = cm_convert_to_ms(iba_time: cm_id_priv->av.timeout);
997
998 /* Check if the device started its remove_one */
999 spin_lock_irqsave(&cm.lock, flags);
1000 if (!cm_dev->going_down)
1001 queue_delayed_work(wq: cm.wq, dwork: &cm_id_priv->timewait_info->work.work,
1002 delay: msecs_to_jiffies(m: wait_time));
1003 spin_unlock_irqrestore(lock: &cm.lock, flags);
1004
1005 /*
1006 * The timewait_info is converted into a work and gets freed during
1007 * cm_free_work() in cm_timewait_handler().
1008 */
1009 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1010 cm_id_priv->timewait_info = NULL;
1011}
1012
1013static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1014{
1015 unsigned long flags;
1016
1017 lockdep_assert_held(&cm_id_priv->lock);
1018
1019 cm_id_priv->id.state = IB_CM_IDLE;
1020 if (cm_id_priv->timewait_info) {
1021 spin_lock_irqsave(&cm.lock, flags);
1022 cm_remove_remote(cm_id_priv);
1023 spin_unlock_irqrestore(lock: &cm.lock, flags);
1024 kfree(objp: cm_id_priv->timewait_info);
1025 cm_id_priv->timewait_info = NULL;
1026 }
1027}
1028
1029static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
1030 enum ib_cm_state old_state)
1031{
1032 struct cm_id_private *cm_id_priv;
1033
1034 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1035 pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
1036 cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
1037}
1038
1039static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1040{
1041 struct cm_id_private *cm_id_priv;
1042 enum ib_cm_state old_state;
1043 struct cm_work *work;
1044 int ret;
1045
1046 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1047 spin_lock_irq(lock: &cm_id_priv->lock);
1048 old_state = cm_id->state;
1049retest:
1050 switch (cm_id->state) {
1051 case IB_CM_LISTEN:
1052 spin_lock(lock: &cm.lock);
1053 if (--cm_id_priv->listen_sharecount > 0) {
1054 /* The id is still shared. */
1055 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1056 spin_unlock(lock: &cm.lock);
1057 spin_unlock_irq(lock: &cm_id_priv->lock);
1058 cm_deref_id(cm_id_priv);
1059 return;
1060 }
1061 cm_id->state = IB_CM_IDLE;
1062 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1063 RB_CLEAR_NODE(&cm_id_priv->service_node);
1064 spin_unlock(lock: &cm.lock);
1065 break;
1066 case IB_CM_SIDR_REQ_SENT:
1067 cm_id->state = IB_CM_IDLE;
1068 ib_cancel_mad(send_buf: cm_id_priv->msg);
1069 break;
1070 case IB_CM_SIDR_REQ_RCVD:
1071 cm_send_sidr_rep_locked(cm_id_priv,
1072 param: &(struct ib_cm_sidr_rep_param){
1073 .status = IB_SIDR_REJECT });
1074 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1075 cm_id->state = IB_CM_IDLE;
1076 break;
1077 case IB_CM_REQ_SENT:
1078 case IB_CM_MRA_REQ_RCVD:
1079 ib_cancel_mad(send_buf: cm_id_priv->msg);
1080 cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_TIMEOUT,
1081 ari: &cm_id_priv->id.device->node_guid,
1082 ari_length: sizeof(cm_id_priv->id.device->node_guid),
1083 NULL, private_data_len: 0);
1084 break;
1085 case IB_CM_REQ_RCVD:
1086 if (err == -ENOMEM) {
1087 /* Do not reject to allow future retries. */
1088 cm_reset_to_idle(cm_id_priv);
1089 } else {
1090 cm_send_rej_locked(cm_id_priv,
1091 reason: IB_CM_REJ_CONSUMER_DEFINED, NULL, ari_length: 0,
1092 NULL, private_data_len: 0);
1093 }
1094 break;
1095 case IB_CM_REP_SENT:
1096 case IB_CM_MRA_REP_RCVD:
1097 ib_cancel_mad(send_buf: cm_id_priv->msg);
1098 cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_CONSUMER_DEFINED, NULL,
1099 ari_length: 0, NULL, private_data_len: 0);
1100 goto retest;
1101 case IB_CM_MRA_REQ_SENT:
1102 case IB_CM_REP_RCVD:
1103 case IB_CM_MRA_REP_SENT:
1104 cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_CONSUMER_DEFINED, NULL,
1105 ari_length: 0, NULL, private_data_len: 0);
1106 break;
1107 case IB_CM_ESTABLISHED:
1108 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1109 cm_id->state = IB_CM_IDLE;
1110 break;
1111 }
1112 cm_send_dreq_locked(cm_id_priv, NULL, private_data_len: 0);
1113 goto retest;
1114 case IB_CM_DREQ_SENT:
1115 ib_cancel_mad(send_buf: cm_id_priv->msg);
1116 cm_enter_timewait(cm_id_priv);
1117 goto retest;
1118 case IB_CM_DREQ_RCVD:
1119 cm_send_drep_locked(cm_id_priv, NULL, private_data_len: 0);
1120 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1121 goto retest;
1122 case IB_CM_TIMEWAIT:
1123 /*
1124 * The cm_acquire_id in cm_timewait_handler will stop working
1125 * once we do xa_erase below, so just move to idle here for
1126 * consistency.
1127 */
1128 cm_id->state = IB_CM_IDLE;
1129 break;
1130 case IB_CM_IDLE:
1131 break;
1132 }
1133 WARN_ON(cm_id->state != IB_CM_IDLE);
1134
1135 spin_lock(lock: &cm.lock);
1136 /* Required for cleanup paths related cm_req_handler() */
1137 if (cm_id_priv->timewait_info) {
1138 cm_remove_remote(cm_id_priv);
1139 kfree(objp: cm_id_priv->timewait_info);
1140 cm_id_priv->timewait_info = NULL;
1141 }
1142
1143 WARN_ON(cm_id_priv->listen_sharecount);
1144 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1145 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1146 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1147 spin_unlock(lock: &cm.lock);
1148 spin_unlock_irq(lock: &cm_id_priv->lock);
1149
1150 xa_erase(&cm.local_id_table, index: cm_local_id(local_id: cm_id->local_id));
1151 cm_deref_id(cm_id_priv);
1152 do {
1153 ret = wait_for_completion_timeout(x: &cm_id_priv->comp,
1154 timeout: msecs_to_jiffies(
1155 CM_DESTROY_ID_WAIT_TIMEOUT));
1156 if (!ret) /* timeout happened */
1157 cm_destroy_id_wait_timeout(cm_id, old_state);
1158 } while (!ret);
1159
1160 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1161 cm_free_work(work);
1162
1163 cm_destroy_av(av: &cm_id_priv->av);
1164 cm_destroy_av(av: &cm_id_priv->alt_av);
1165 kfree(objp: cm_id_priv->private_data);
1166 kfree_rcu(cm_id_priv, rcu);
1167}
1168
1169void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1170{
1171 cm_destroy_id(cm_id, err: 0);
1172}
1173EXPORT_SYMBOL(ib_destroy_cm_id);
1174
1175static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
1176{
1177 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1178 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1179 return -EINVAL;
1180
1181 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1182 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1183 else
1184 cm_id_priv->id.service_id = service_id;
1185
1186 return 0;
1187}
1188
1189/**
1190 * ib_cm_listen - Initiates listening on the specified service ID for
1191 * connection and service ID resolution requests.
1192 * @cm_id: Connection identifier associated with the listen request.
1193 * @service_id: Service identifier matched against incoming connection
1194 * and service ID resolution requests. The service ID should be specified
1195 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1196 * assign a service ID to the caller.
1197 */
1198int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
1199{
1200 struct cm_id_private *cm_id_priv =
1201 container_of(cm_id, struct cm_id_private, id);
1202 unsigned long flags;
1203 int ret;
1204
1205 spin_lock_irqsave(&cm_id_priv->lock, flags);
1206 if (cm_id_priv->id.state != IB_CM_IDLE) {
1207 ret = -EINVAL;
1208 goto out;
1209 }
1210
1211 ret = cm_init_listen(cm_id_priv, service_id);
1212 if (ret)
1213 goto out;
1214
1215 if (!cm_insert_listen(cm_id_priv, NULL)) {
1216 ret = -EBUSY;
1217 goto out;
1218 }
1219
1220 cm_id_priv->id.state = IB_CM_LISTEN;
1221 ret = 0;
1222
1223out:
1224 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
1225 return ret;
1226}
1227EXPORT_SYMBOL(ib_cm_listen);
1228
1229/**
1230 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1231 * the given service ID.
1232 *
1233 * If there's an existing ID listening on that same device and service ID,
1234 * return it.
1235 *
1236 * @device: Device associated with the cm_id. All related communication will
1237 * be associated with the specified device.
1238 * @cm_handler: Callback invoked to notify the user of CM events.
1239 * @service_id: Service identifier matched against incoming connection
1240 * and service ID resolution requests. The service ID should be specified
1241 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1242 * assign a service ID to the caller.
1243 *
1244 * Callers should call ib_destroy_cm_id when done with the listener ID.
1245 */
1246struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1247 ib_cm_handler cm_handler,
1248 __be64 service_id)
1249{
1250 struct cm_id_private *listen_id_priv;
1251 struct cm_id_private *cm_id_priv;
1252 int err = 0;
1253
1254 /* Create an ID in advance, since the creation may sleep */
1255 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1256 if (IS_ERR(ptr: cm_id_priv))
1257 return ERR_CAST(ptr: cm_id_priv);
1258
1259 err = cm_init_listen(cm_id_priv, service_id);
1260 if (err) {
1261 ib_destroy_cm_id(&cm_id_priv->id);
1262 return ERR_PTR(error: err);
1263 }
1264
1265 spin_lock_irq(lock: &cm_id_priv->lock);
1266 listen_id_priv = cm_insert_listen(cm_id_priv, shared_handler: cm_handler);
1267 if (listen_id_priv != cm_id_priv) {
1268 spin_unlock_irq(lock: &cm_id_priv->lock);
1269 ib_destroy_cm_id(&cm_id_priv->id);
1270 if (!listen_id_priv)
1271 return ERR_PTR(error: -EINVAL);
1272 return &listen_id_priv->id;
1273 }
1274 cm_id_priv->id.state = IB_CM_LISTEN;
1275 spin_unlock_irq(lock: &cm_id_priv->lock);
1276
1277 /*
1278 * A listen ID does not need to be in the xarray since it does not
1279 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1280 * and does not enter timewait.
1281 */
1282
1283 return &cm_id_priv->id;
1284}
1285EXPORT_SYMBOL(ib_cm_insert_listen);
1286
1287static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1288{
1289 u64 hi_tid = 0, low_tid;
1290
1291 lockdep_assert_held(&cm_id_priv->lock);
1292
1293 low_tid = (u64)cm_id_priv->id.local_id;
1294 if (!cm_id_priv->av.port)
1295 return cpu_to_be64(low_tid);
1296
1297 spin_lock(lock: &cm_id_priv->av.port->cm_dev->mad_agent_lock);
1298 if (cm_id_priv->av.port->mad_agent)
1299 hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1300 spin_unlock(lock: &cm_id_priv->av.port->cm_dev->mad_agent_lock);
1301 return cpu_to_be64(hi_tid | low_tid);
1302}
1303
1304static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1305 __be16 attr_id, __be64 tid)
1306{
1307 hdr->base_version = IB_MGMT_BASE_VERSION;
1308 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1309 hdr->class_version = IB_CM_CLASS_VERSION;
1310 hdr->method = IB_MGMT_METHOD_SEND;
1311 hdr->attr_id = attr_id;
1312 hdr->tid = tid;
1313}
1314
1315static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1316 __be64 tid, u32 attr_mod)
1317{
1318 cm_format_mad_hdr(hdr, attr_id, tid);
1319 hdr->attr_mod = cpu_to_be32(attr_mod);
1320}
1321
1322static void cm_format_req(struct cm_req_msg *req_msg,
1323 struct cm_id_private *cm_id_priv,
1324 struct ib_cm_req_param *param)
1325{
1326 struct sa_path_rec *pri_path = param->primary_path;
1327 struct sa_path_rec *alt_path = param->alternate_path;
1328 bool pri_ext = false;
1329 __be16 lid;
1330
1331 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1332 pri_ext = opa_is_extended_lid(dlid: pri_path->opa.dlid,
1333 slid: pri_path->opa.slid);
1334
1335 cm_format_mad_ece_hdr(hdr: &req_msg->hdr, CM_REQ_ATTR_ID,
1336 tid: cm_form_tid(cm_id_priv), attr_mod: param->ece.attr_mod);
1337
1338 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1339 be32_to_cpu(cm_id_priv->id.local_id));
1340 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1341 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1342 be64_to_cpu(cm_id_priv->id.device->node_guid));
1343 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1344 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1345 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1346 param->remote_cm_response_timeout);
1347 cm_req_set_qp_type(req_msg, qp_type: param->qp_type);
1348 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1349 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1350 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1351 param->local_cm_response_timeout);
1352 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1353 be16_to_cpu(param->primary_path->pkey));
1354 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1355 param->primary_path->mtu);
1356 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1357
1358 if (param->qp_type != IB_QPT_XRC_INI) {
1359 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1360 param->responder_resources);
1361 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1362 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1363 param->rnr_retry_count);
1364 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1365 }
1366
1367 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1368 pri_path->sgid;
1369 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1370 pri_path->dgid;
1371 if (pri_ext) {
1372 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1373 ->global.interface_id =
1374 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1375 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1376 ->global.interface_id =
1377 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1378 }
1379 if (pri_path->hop_limit <= 1) {
1380 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1381 be16_to_cpu(pri_ext ? 0 :
1382 htons(ntohl(sa_path_get_slid(
1383 pri_path)))));
1384 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1385 be16_to_cpu(pri_ext ? 0 :
1386 htons(ntohl(sa_path_get_dlid(
1387 pri_path)))));
1388 } else {
1389
1390 if (param->primary_path_inbound) {
1391 lid = param->primary_path_inbound->ib.dlid;
1392 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1393 be16_to_cpu(lid));
1394 } else
1395 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1396 be16_to_cpu(IB_LID_PERMISSIVE));
1397
1398 /* Work-around until there's a way to obtain remote LID info */
1399 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1400 be16_to_cpu(IB_LID_PERMISSIVE));
1401 }
1402 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1403 be32_to_cpu(pri_path->flow_label));
1404 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1405 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1406 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1407 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1408 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1409 (pri_path->hop_limit <= 1));
1410 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1411 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1412 pri_path->packet_life_time));
1413
1414 if (alt_path) {
1415 bool alt_ext = false;
1416
1417 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1418 alt_ext = opa_is_extended_lid(dlid: alt_path->opa.dlid,
1419 slid: alt_path->opa.slid);
1420
1421 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1422 alt_path->sgid;
1423 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1424 alt_path->dgid;
1425 if (alt_ext) {
1426 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1427 req_msg)
1428 ->global.interface_id =
1429 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1430 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1431 req_msg)
1432 ->global.interface_id =
1433 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1434 }
1435 if (alt_path->hop_limit <= 1) {
1436 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1437 be16_to_cpu(
1438 alt_ext ? 0 :
1439 htons(ntohl(sa_path_get_slid(
1440 alt_path)))));
1441 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1442 be16_to_cpu(
1443 alt_ext ? 0 :
1444 htons(ntohl(sa_path_get_dlid(
1445 alt_path)))));
1446 } else {
1447 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1448 be16_to_cpu(IB_LID_PERMISSIVE));
1449 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1450 be16_to_cpu(IB_LID_PERMISSIVE));
1451 }
1452 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1453 be32_to_cpu(alt_path->flow_label));
1454 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1455 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1456 alt_path->traffic_class);
1457 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1458 alt_path->hop_limit);
1459 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1460 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1461 (alt_path->hop_limit <= 1));
1462 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1463 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1464 alt_path->packet_life_time));
1465 }
1466 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1467
1468 if (param->private_data && param->private_data_len)
1469 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1470 param->private_data_len);
1471}
1472
1473static int cm_validate_req_param(struct ib_cm_req_param *param)
1474{
1475 if (!param->primary_path)
1476 return -EINVAL;
1477
1478 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1479 param->qp_type != IB_QPT_XRC_INI)
1480 return -EINVAL;
1481
1482 if (param->private_data &&
1483 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1484 return -EINVAL;
1485
1486 if (param->alternate_path &&
1487 (param->alternate_path->pkey != param->primary_path->pkey ||
1488 param->alternate_path->mtu != param->primary_path->mtu))
1489 return -EINVAL;
1490
1491 return 0;
1492}
1493
1494int ib_send_cm_req(struct ib_cm_id *cm_id,
1495 struct ib_cm_req_param *param)
1496{
1497 struct cm_av av = {}, alt_av = {};
1498 struct cm_id_private *cm_id_priv;
1499 struct ib_mad_send_buf *msg;
1500 struct cm_req_msg *req_msg;
1501 unsigned long flags;
1502 int ret;
1503
1504 ret = cm_validate_req_param(param);
1505 if (ret)
1506 return ret;
1507
1508 /* Verify that we're not in timewait. */
1509 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1510 spin_lock_irqsave(&cm_id_priv->lock, flags);
1511 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1512 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
1513 return -EINVAL;
1514 }
1515 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
1516
1517 cm_id_priv->timewait_info = cm_create_timewait_info(local_id: cm_id_priv->
1518 id.local_id);
1519 if (IS_ERR(ptr: cm_id_priv->timewait_info)) {
1520 ret = PTR_ERR(ptr: cm_id_priv->timewait_info);
1521 cm_id_priv->timewait_info = NULL;
1522 return ret;
1523 }
1524
1525 ret = cm_init_av_by_path(path: param->primary_path,
1526 sgid_attr: param->ppath_sgid_attr, av: &av);
1527 if (ret)
1528 return ret;
1529 if (param->alternate_path) {
1530 ret = cm_init_av_by_path(path: param->alternate_path, NULL,
1531 av: &alt_av);
1532 if (ret) {
1533 cm_destroy_av(av: &av);
1534 return ret;
1535 }
1536 }
1537 cm_id->service_id = param->service_id;
1538 cm_id_priv->timeout_ms = cm_convert_to_ms(
1539 iba_time: param->primary_path->packet_life_time) * 2 +
1540 cm_convert_to_ms(
1541 iba_time: param->remote_cm_response_timeout);
1542 cm_id_priv->max_cm_retries = param->max_cm_retries;
1543 cm_id_priv->initiator_depth = param->initiator_depth;
1544 cm_id_priv->responder_resources = param->responder_resources;
1545 cm_id_priv->retry_count = param->retry_count;
1546 cm_id_priv->path_mtu = param->primary_path->mtu;
1547 cm_id_priv->pkey = param->primary_path->pkey;
1548 cm_id_priv->qp_type = param->qp_type;
1549
1550 spin_lock_irqsave(&cm_id_priv->lock, flags);
1551
1552 cm_move_av_from_path(dest: &cm_id_priv->av, src: &av);
1553 if (param->primary_path_outbound)
1554 cm_id_priv->av.dlid_datapath =
1555 be16_to_cpu(param->primary_path_outbound->ib.dlid);
1556
1557 if (param->alternate_path)
1558 cm_move_av_from_path(dest: &cm_id_priv->alt_av, src: &alt_av);
1559
1560 msg = cm_alloc_priv_msg(cm_id_priv);
1561 if (IS_ERR(ptr: msg)) {
1562 ret = PTR_ERR(ptr: msg);
1563 goto out_unlock;
1564 }
1565
1566 req_msg = (struct cm_req_msg *)msg->mad;
1567 cm_format_req(req_msg, cm_id_priv, param);
1568 cm_id_priv->tid = req_msg->hdr.tid;
1569 msg->timeout_ms = cm_id_priv->timeout_ms;
1570 msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1571
1572 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1573 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1574
1575 trace_icm_send_req(cm_id: &cm_id_priv->id);
1576 ret = ib_post_send_mad(send_buf: msg, NULL);
1577 if (ret)
1578 goto out_free;
1579 BUG_ON(cm_id->state != IB_CM_IDLE);
1580 cm_id->state = IB_CM_REQ_SENT;
1581 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
1582 return 0;
1583out_free:
1584 cm_free_priv_msg(msg);
1585out_unlock:
1586 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
1587 return ret;
1588}
1589EXPORT_SYMBOL(ib_send_cm_req);
1590
1591static int cm_issue_rej(struct cm_port *port,
1592 struct ib_mad_recv_wc *mad_recv_wc,
1593 enum ib_cm_rej_reason reason,
1594 enum cm_msg_response msg_rejected,
1595 void *ari, u8 ari_length)
1596{
1597 struct ib_mad_send_buf *msg = NULL;
1598 struct cm_rej_msg *rej_msg, *rcv_msg;
1599 int ret;
1600
1601 ret = cm_alloc_response_msg(port, mad_recv_wc, msg: &msg);
1602 if (ret)
1603 return ret;
1604
1605 /* We just need common CM header information. Cast to any message. */
1606 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1607 rej_msg = (struct cm_rej_msg *) msg->mad;
1608
1609 cm_format_mad_hdr(hdr: &rej_msg->hdr, CM_REJ_ATTR_ID, tid: rcv_msg->hdr.tid);
1610 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1611 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1612 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1613 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1614 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1615 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1616
1617 if (ari && ari_length) {
1618 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1619 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1620 }
1621
1622 trace_icm_issue_rej(
1623 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1624 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1625 ret = ib_post_send_mad(send_buf: msg, NULL);
1626 if (ret)
1627 cm_free_response_msg(msg);
1628
1629 return ret;
1630}
1631
1632static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1633{
1634 return ((cpu_to_be16(
1635 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1636 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1637 req_msg))));
1638}
1639
1640static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1641 struct sa_path_rec *path, union ib_gid *gid)
1642{
1643 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(device: ib_device, port_num))
1644 path->rec_type = SA_PATH_REC_TYPE_OPA;
1645 else
1646 path->rec_type = SA_PATH_REC_TYPE_IB;
1647}
1648
1649static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1650 struct sa_path_rec *primary_path,
1651 struct sa_path_rec *alt_path,
1652 struct ib_wc *wc)
1653{
1654 u32 lid;
1655
1656 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1657 sa_path_set_dlid(rec: primary_path, dlid: wc->slid);
1658 sa_path_set_slid(rec: primary_path,
1659 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1660 req_msg));
1661 } else {
1662 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1663 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1664 sa_path_set_dlid(rec: primary_path, dlid: lid);
1665
1666 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1667 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1668 sa_path_set_slid(rec: primary_path, slid: lid);
1669 }
1670
1671 if (!cm_req_has_alt_path(req_msg))
1672 return;
1673
1674 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1675 sa_path_set_dlid(rec: alt_path,
1676 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1677 req_msg));
1678 sa_path_set_slid(rec: alt_path,
1679 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1680 req_msg));
1681 } else {
1682 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1683 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1684 sa_path_set_dlid(rec: alt_path, dlid: lid);
1685
1686 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1687 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1688 sa_path_set_slid(rec: alt_path, slid: lid);
1689 }
1690}
1691
1692static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1693 struct sa_path_rec *primary_path,
1694 struct sa_path_rec *alt_path,
1695 struct ib_wc *wc)
1696{
1697 primary_path->dgid =
1698 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1699 primary_path->sgid =
1700 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1701 primary_path->flow_label =
1702 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1703 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1704 primary_path->traffic_class =
1705 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1706 primary_path->reversible = 1;
1707 primary_path->pkey =
1708 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1709 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1710 primary_path->mtu_selector = IB_SA_EQ;
1711 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1712 primary_path->rate_selector = IB_SA_EQ;
1713 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1714 primary_path->packet_life_time_selector = IB_SA_EQ;
1715 primary_path->packet_life_time =
1716 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1717 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1718 primary_path->service_id =
1719 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1720 if (sa_path_is_roce(rec: primary_path))
1721 primary_path->roce.route_resolved = false;
1722
1723 if (cm_req_has_alt_path(req_msg)) {
1724 alt_path->dgid = *IBA_GET_MEM_PTR(
1725 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1726 alt_path->sgid = *IBA_GET_MEM_PTR(
1727 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1728 alt_path->flow_label = cpu_to_be32(
1729 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1730 alt_path->hop_limit =
1731 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1732 alt_path->traffic_class =
1733 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1734 alt_path->reversible = 1;
1735 alt_path->pkey =
1736 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1737 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1738 alt_path->mtu_selector = IB_SA_EQ;
1739 alt_path->mtu =
1740 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1741 alt_path->rate_selector = IB_SA_EQ;
1742 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1743 alt_path->packet_life_time_selector = IB_SA_EQ;
1744 alt_path->packet_life_time =
1745 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1746 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1747 alt_path->service_id =
1748 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1749
1750 if (sa_path_is_roce(rec: alt_path))
1751 alt_path->roce.route_resolved = false;
1752 }
1753 cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1754}
1755
1756static u16 cm_get_bth_pkey(struct cm_work *work)
1757{
1758 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1759 u32 port_num = work->port->port_num;
1760 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1761 u16 pkey;
1762 int ret;
1763
1764 ret = ib_get_cached_pkey(device_handle: ib_dev, port_num, index: pkey_index, pkey: &pkey);
1765 if (ret) {
1766 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1767 port_num, pkey_index, ret);
1768 return 0;
1769 }
1770
1771 return pkey;
1772}
1773
1774/**
1775 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1776 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1777 * reject them as the local_gid will not match the sgid. Therefore,
1778 * change the pathrec's SGID to an IB SGID.
1779 *
1780 * @work: Work completion
1781 * @path: Path record
1782 */
1783static void cm_opa_to_ib_sgid(struct cm_work *work,
1784 struct sa_path_rec *path)
1785{
1786 struct ib_device *dev = work->port->cm_dev->ib_device;
1787 u32 port_num = work->port->port_num;
1788
1789 if (rdma_cap_opa_ah(device: dev, port_num) &&
1790 (ib_is_opa_gid(gid: &path->sgid))) {
1791 union ib_gid sgid;
1792
1793 if (rdma_query_gid(device: dev, port_num, index: 0, gid: &sgid)) {
1794 dev_warn(&dev->dev,
1795 "Error updating sgid in CM request\n");
1796 return;
1797 }
1798
1799 path->sgid = sgid;
1800 }
1801}
1802
1803static void cm_format_req_event(struct cm_work *work,
1804 struct cm_id_private *cm_id_priv,
1805 struct ib_cm_id *listen_id)
1806{
1807 struct cm_req_msg *req_msg;
1808 struct ib_cm_req_event_param *param;
1809
1810 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1811 param = &work->cm_event.param.req_rcvd;
1812 param->listen_id = listen_id;
1813 param->bth_pkey = cm_get_bth_pkey(work);
1814 param->port = cm_id_priv->av.port->port_num;
1815 param->primary_path = &work->path[0];
1816 cm_opa_to_ib_sgid(work, path: param->primary_path);
1817 if (cm_req_has_alt_path(req_msg)) {
1818 param->alternate_path = &work->path[1];
1819 cm_opa_to_ib_sgid(work, path: param->alternate_path);
1820 } else {
1821 param->alternate_path = NULL;
1822 }
1823 param->remote_ca_guid =
1824 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1825 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1826 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1827 param->qp_type = cm_req_get_qp_type(req_msg);
1828 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1829 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1830 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1831 param->local_cm_response_timeout =
1832 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1833 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1834 param->remote_cm_response_timeout =
1835 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1836 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1837 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1838 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1839 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1840 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1841 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1842
1843 work->cm_event.private_data =
1844 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1845}
1846
1847static void cm_process_work(struct cm_id_private *cm_id_priv,
1848 struct cm_work *work)
1849{
1850 int ret;
1851
1852 /* We will typically only have the current event to report. */
1853 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1854 cm_free_work(work);
1855
1856 while (!ret && !atomic_add_negative(i: -1, v: &cm_id_priv->work_count)) {
1857 spin_lock_irq(lock: &cm_id_priv->lock);
1858 work = cm_dequeue_work(cm_id_priv);
1859 spin_unlock_irq(lock: &cm_id_priv->lock);
1860 if (!work)
1861 return;
1862
1863 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1864 &work->cm_event);
1865 cm_free_work(work);
1866 }
1867 cm_deref_id(cm_id_priv);
1868 if (ret)
1869 cm_destroy_id(cm_id: &cm_id_priv->id, err: ret);
1870}
1871
1872static void cm_format_mra(struct cm_mra_msg *mra_msg,
1873 struct cm_id_private *cm_id_priv,
1874 enum cm_msg_response msg_mraed, u8 service_timeout,
1875 const void *private_data, u8 private_data_len)
1876{
1877 cm_format_mad_hdr(hdr: &mra_msg->hdr, CM_MRA_ATTR_ID, tid: cm_id_priv->tid);
1878 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1879 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1880 be32_to_cpu(cm_id_priv->id.local_id));
1881 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1882 be32_to_cpu(cm_id_priv->id.remote_id));
1883 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1884
1885 if (private_data && private_data_len)
1886 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1887 private_data_len);
1888}
1889
1890static void cm_format_rej(struct cm_rej_msg *rej_msg,
1891 struct cm_id_private *cm_id_priv,
1892 enum ib_cm_rej_reason reason, void *ari,
1893 u8 ari_length, const void *private_data,
1894 u8 private_data_len, enum ib_cm_state state)
1895{
1896 lockdep_assert_held(&cm_id_priv->lock);
1897
1898 cm_format_mad_hdr(hdr: &rej_msg->hdr, CM_REJ_ATTR_ID, tid: cm_id_priv->tid);
1899 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1900 be32_to_cpu(cm_id_priv->id.remote_id));
1901
1902 switch (state) {
1903 case IB_CM_REQ_RCVD:
1904 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1905 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1906 break;
1907 case IB_CM_MRA_REQ_SENT:
1908 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1909 be32_to_cpu(cm_id_priv->id.local_id));
1910 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1911 break;
1912 case IB_CM_REP_RCVD:
1913 case IB_CM_MRA_REP_SENT:
1914 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1915 be32_to_cpu(cm_id_priv->id.local_id));
1916 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1917 break;
1918 default:
1919 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1920 be32_to_cpu(cm_id_priv->id.local_id));
1921 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1922 CM_MSG_RESPONSE_OTHER);
1923 break;
1924 }
1925
1926 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1927 if (ari && ari_length) {
1928 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1929 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1930 }
1931
1932 if (private_data && private_data_len)
1933 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1934 private_data_len);
1935}
1936
1937static void cm_dup_req_handler(struct cm_work *work,
1938 struct cm_id_private *cm_id_priv)
1939{
1940 struct ib_mad_send_buf *msg = NULL;
1941 int ret;
1942
1943 atomic_long_inc(
1944 v: &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1945
1946 /* Quick state check to discard duplicate REQs. */
1947 spin_lock_irq(lock: &cm_id_priv->lock);
1948 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1949 spin_unlock_irq(lock: &cm_id_priv->lock);
1950 return;
1951 }
1952 spin_unlock_irq(lock: &cm_id_priv->lock);
1953
1954 ret = cm_alloc_response_msg(port: work->port, mad_recv_wc: work->mad_recv_wc, msg: &msg);
1955 if (ret)
1956 return;
1957
1958 spin_lock_irq(lock: &cm_id_priv->lock);
1959 switch (cm_id_priv->id.state) {
1960 case IB_CM_MRA_REQ_SENT:
1961 cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv,
1962 msg_mraed: CM_MSG_RESPONSE_REQ, service_timeout: cm_id_priv->service_timeout,
1963 private_data: cm_id_priv->private_data,
1964 private_data_len: cm_id_priv->private_data_len);
1965 break;
1966 case IB_CM_TIMEWAIT:
1967 cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv,
1968 reason: IB_CM_REJ_STALE_CONN, NULL, ari_length: 0, NULL, private_data_len: 0,
1969 state: IB_CM_TIMEWAIT);
1970 break;
1971 default:
1972 goto unlock;
1973 }
1974 spin_unlock_irq(lock: &cm_id_priv->lock);
1975
1976 trace_icm_send_dup_req(cm_id: &cm_id_priv->id);
1977 ret = ib_post_send_mad(send_buf: msg, NULL);
1978 if (ret)
1979 goto free;
1980 return;
1981
1982unlock: spin_unlock_irq(lock: &cm_id_priv->lock);
1983free: cm_free_response_msg(msg);
1984}
1985
1986static struct cm_id_private *cm_match_req(struct cm_work *work,
1987 struct cm_id_private *cm_id_priv)
1988{
1989 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1990 struct cm_timewait_info *timewait_info;
1991 struct cm_req_msg *req_msg;
1992
1993 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1994
1995 /* Check for possible duplicate REQ. */
1996 spin_lock_irq(lock: &cm.lock);
1997 timewait_info = cm_insert_remote_id(timewait_info: cm_id_priv->timewait_info);
1998 if (timewait_info) {
1999 cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id,
2000 remote_id: timewait_info->work.remote_id);
2001 spin_unlock_irq(lock: &cm.lock);
2002 if (cur_cm_id_priv) {
2003 cm_dup_req_handler(work, cm_id_priv: cur_cm_id_priv);
2004 cm_deref_id(cm_id_priv: cur_cm_id_priv);
2005 }
2006 return NULL;
2007 }
2008
2009 /* Check for stale connections. */
2010 timewait_info = cm_insert_remote_qpn(timewait_info: cm_id_priv->timewait_info);
2011 if (timewait_info) {
2012 cm_remove_remote(cm_id_priv);
2013 cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id,
2014 remote_id: timewait_info->work.remote_id);
2015
2016 spin_unlock_irq(lock: &cm.lock);
2017 cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc,
2018 reason: IB_CM_REJ_STALE_CONN, msg_rejected: CM_MSG_RESPONSE_REQ,
2019 NULL, ari_length: 0);
2020 if (cur_cm_id_priv) {
2021 ib_send_cm_dreq(cm_id: &cur_cm_id_priv->id, NULL, private_data_len: 0);
2022 cm_deref_id(cm_id_priv: cur_cm_id_priv);
2023 }
2024 return NULL;
2025 }
2026
2027 /* Find matching listen request. */
2028 listen_cm_id_priv = cm_find_listen(
2029 device: cm_id_priv->id.device,
2030 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2031 if (!listen_cm_id_priv) {
2032 cm_remove_remote(cm_id_priv);
2033 spin_unlock_irq(lock: &cm.lock);
2034 cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc,
2035 reason: IB_CM_REJ_INVALID_SERVICE_ID, msg_rejected: CM_MSG_RESPONSE_REQ,
2036 NULL, ari_length: 0);
2037 return NULL;
2038 }
2039 spin_unlock_irq(lock: &cm.lock);
2040 return listen_cm_id_priv;
2041}
2042
2043/*
2044 * Work-around for inter-subnet connections. If the LIDs are permissive,
2045 * we need to override the LID/SL data in the REQ with the LID information
2046 * in the work completion.
2047 */
2048static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2049{
2050 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2051 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2052 req_msg)) == IB_LID_PERMISSIVE) {
2053 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2054 be16_to_cpu(ib_lid_be16(wc->slid)));
2055 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2056 }
2057
2058 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2059 req_msg)) == IB_LID_PERMISSIVE)
2060 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2061 wc->dlid_path_bits);
2062 }
2063
2064 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2065 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2066 req_msg)) == IB_LID_PERMISSIVE) {
2067 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2068 be16_to_cpu(ib_lid_be16(wc->slid)));
2069 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2070 }
2071
2072 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2073 req_msg)) == IB_LID_PERMISSIVE)
2074 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2075 wc->dlid_path_bits);
2076 }
2077}
2078
2079static int cm_req_handler(struct cm_work *work)
2080{
2081 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2082 struct cm_req_msg *req_msg;
2083 const struct ib_global_route *grh;
2084 const struct ib_gid_attr *gid_attr;
2085 int ret;
2086
2087 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2088
2089 cm_id_priv =
2090 cm_alloc_id_priv(device: work->port->cm_dev->ib_device, NULL, NULL);
2091 if (IS_ERR(ptr: cm_id_priv))
2092 return PTR_ERR(ptr: cm_id_priv);
2093
2094 cm_id_priv->id.remote_id =
2095 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2096 cm_id_priv->id.service_id =
2097 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2098 cm_id_priv->tid = req_msg->hdr.tid;
2099 cm_id_priv->timeout_ms = cm_convert_to_ms(
2100 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2101 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2102 cm_id_priv->remote_qpn =
2103 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2104 cm_id_priv->initiator_depth =
2105 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2106 cm_id_priv->responder_resources =
2107 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2108 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2109 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2110 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2111 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2112 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2113 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2114
2115 ret = cm_init_av_for_response(port: work->port, wc: work->mad_recv_wc->wc,
2116 grh: work->mad_recv_wc->recv_buf.grh,
2117 av: &cm_id_priv->av);
2118 if (ret)
2119 goto destroy;
2120 cm_id_priv->timewait_info = cm_create_timewait_info(local_id: cm_id_priv->
2121 id.local_id);
2122 if (IS_ERR(ptr: cm_id_priv->timewait_info)) {
2123 ret = PTR_ERR(ptr: cm_id_priv->timewait_info);
2124 cm_id_priv->timewait_info = NULL;
2125 goto destroy;
2126 }
2127 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2128 cm_id_priv->timewait_info->remote_ca_guid =
2129 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2130 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2131
2132 /*
2133 * Note that the ID pointer is not in the xarray at this point,
2134 * so this set is only visible to the local thread.
2135 */
2136 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2137
2138 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2139 if (!listen_cm_id_priv) {
2140 trace_icm_no_listener_err(cm_id: &cm_id_priv->id);
2141 cm_id_priv->id.state = IB_CM_IDLE;
2142 ret = -EINVAL;
2143 goto destroy;
2144 }
2145
2146 memset(&work->path[0], 0, sizeof(work->path[0]));
2147 if (cm_req_has_alt_path(req_msg))
2148 memset(&work->path[1], 0, sizeof(work->path[1]));
2149 grh = rdma_ah_read_grh(attr: &cm_id_priv->av.ah_attr);
2150 gid_attr = grh->sgid_attr;
2151
2152 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2153 work->path[0].rec_type =
2154 sa_conv_gid_to_pathrec_type(type: gid_attr->gid_type);
2155 } else {
2156 cm_process_routed_req(req_msg, wc: work->mad_recv_wc->wc);
2157 cm_path_set_rec_type(
2158 ib_device: work->port->cm_dev->ib_device, port_num: work->port->port_num,
2159 path: &work->path[0],
2160 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2161 req_msg));
2162 }
2163 if (cm_req_has_alt_path(req_msg))
2164 work->path[1].rec_type = work->path[0].rec_type;
2165 cm_format_paths_from_req(req_msg, primary_path: &work->path[0],
2166 alt_path: &work->path[1], wc: work->mad_recv_wc->wc);
2167 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2168 sa_path_set_dmac(rec: &work->path[0],
2169 dmac: cm_id_priv->av.ah_attr.roce.dmac);
2170 work->path[0].hop_limit = grh->hop_limit;
2171
2172 /* This destroy call is needed to pair with cm_init_av_for_response */
2173 cm_destroy_av(av: &cm_id_priv->av);
2174 ret = cm_init_av_by_path(path: &work->path[0], sgid_attr: gid_attr, av: &cm_id_priv->av);
2175 if (ret) {
2176 int err;
2177
2178 err = rdma_query_gid(device: work->port->cm_dev->ib_device,
2179 port_num: work->port->port_num, index: 0,
2180 gid: &work->path[0].sgid);
2181 if (err)
2182 ib_send_cm_rej(cm_id: &cm_id_priv->id, reason: IB_CM_REJ_INVALID_GID,
2183 NULL, ari_length: 0, NULL, private_data_len: 0);
2184 else
2185 ib_send_cm_rej(cm_id: &cm_id_priv->id, reason: IB_CM_REJ_INVALID_GID,
2186 ari: &work->path[0].sgid,
2187 ari_length: sizeof(work->path[0].sgid),
2188 NULL, private_data_len: 0);
2189 goto rejected;
2190 }
2191 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
2192 cm_id_priv->av.dlid_datapath =
2193 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
2194
2195 if (cm_req_has_alt_path(req_msg)) {
2196 ret = cm_init_av_by_path(path: &work->path[1], NULL,
2197 av: &cm_id_priv->alt_av);
2198 if (ret) {
2199 ib_send_cm_rej(cm_id: &cm_id_priv->id,
2200 reason: IB_CM_REJ_INVALID_ALT_GID,
2201 ari: &work->path[0].sgid,
2202 ari_length: sizeof(work->path[0].sgid), NULL, private_data_len: 0);
2203 goto rejected;
2204 }
2205 }
2206
2207 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2208 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2209 cm_format_req_event(work, cm_id_priv, listen_id: &listen_cm_id_priv->id);
2210
2211 /* Now MAD handlers can see the new ID */
2212 spin_lock_irq(lock: &cm_id_priv->lock);
2213 cm_finalize_id(cm_id_priv);
2214
2215 /* Refcount belongs to the event, pairs with cm_process_work() */
2216 refcount_inc(r: &cm_id_priv->refcount);
2217 cm_queue_work_unlock(cm_id_priv, work);
2218 /*
2219 * Since this ID was just created and was not made visible to other MAD
2220 * handlers until the cm_finalize_id() above we know that the
2221 * cm_process_work() will deliver the event and the listen_cm_id
2222 * embedded in the event can be derefed here.
2223 */
2224 cm_deref_id(cm_id_priv: listen_cm_id_priv);
2225 return 0;
2226
2227rejected:
2228 cm_deref_id(cm_id_priv: listen_cm_id_priv);
2229destroy:
2230 ib_destroy_cm_id(&cm_id_priv->id);
2231 return ret;
2232}
2233
2234static void cm_format_rep(struct cm_rep_msg *rep_msg,
2235 struct cm_id_private *cm_id_priv,
2236 struct ib_cm_rep_param *param)
2237{
2238 cm_format_mad_ece_hdr(hdr: &rep_msg->hdr, CM_REP_ATTR_ID, tid: cm_id_priv->tid,
2239 attr_mod: param->ece.attr_mod);
2240 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2241 be32_to_cpu(cm_id_priv->id.local_id));
2242 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2243 be32_to_cpu(cm_id_priv->id.remote_id));
2244 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2245 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2246 param->responder_resources);
2247 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2248 cm_id_priv->av.port->cm_dev->ack_delay);
2249 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2250 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2251 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2252 be64_to_cpu(cm_id_priv->id.device->node_guid));
2253
2254 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2255 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2256 param->initiator_depth);
2257 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2258 param->flow_control);
2259 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2260 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2261 } else {
2262 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2263 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2264 }
2265
2266 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2267 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2268 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2269
2270 if (param->private_data && param->private_data_len)
2271 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2272 param->private_data_len);
2273}
2274
2275int ib_send_cm_rep(struct ib_cm_id *cm_id,
2276 struct ib_cm_rep_param *param)
2277{
2278 struct cm_id_private *cm_id_priv;
2279 struct ib_mad_send_buf *msg;
2280 struct cm_rep_msg *rep_msg;
2281 unsigned long flags;
2282 int ret;
2283
2284 if (param->private_data &&
2285 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2286 return -EINVAL;
2287
2288 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2289 spin_lock_irqsave(&cm_id_priv->lock, flags);
2290 if (cm_id->state != IB_CM_REQ_RCVD &&
2291 cm_id->state != IB_CM_MRA_REQ_SENT) {
2292 trace_icm_send_rep_err(local_id: cm_id_priv->id.local_id, state: cm_id->state);
2293 ret = -EINVAL;
2294 goto out;
2295 }
2296
2297 msg = cm_alloc_priv_msg(cm_id_priv);
2298 if (IS_ERR(ptr: msg)) {
2299 ret = PTR_ERR(ptr: msg);
2300 goto out;
2301 }
2302
2303 rep_msg = (struct cm_rep_msg *) msg->mad;
2304 cm_format_rep(rep_msg, cm_id_priv, param);
2305 msg->timeout_ms = cm_id_priv->timeout_ms;
2306 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2307
2308 trace_icm_send_rep(cm_id);
2309 ret = ib_post_send_mad(send_buf: msg, NULL);
2310 if (ret)
2311 goto out_free;
2312
2313 cm_id->state = IB_CM_REP_SENT;
2314 cm_id_priv->initiator_depth = param->initiator_depth;
2315 cm_id_priv->responder_resources = param->responder_resources;
2316 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2317 WARN_ONCE(param->qp_num & 0xFF000000,
2318 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2319 param->qp_num);
2320 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2321 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2322 return 0;
2323
2324out_free:
2325 cm_free_priv_msg(msg);
2326out:
2327 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2328 return ret;
2329}
2330EXPORT_SYMBOL(ib_send_cm_rep);
2331
2332static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2333 struct cm_id_private *cm_id_priv,
2334 const void *private_data,
2335 u8 private_data_len)
2336{
2337 cm_format_mad_hdr(hdr: &rtu_msg->hdr, CM_RTU_ATTR_ID, tid: cm_id_priv->tid);
2338 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2339 be32_to_cpu(cm_id_priv->id.local_id));
2340 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2341 be32_to_cpu(cm_id_priv->id.remote_id));
2342
2343 if (private_data && private_data_len)
2344 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2345 private_data_len);
2346}
2347
2348int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2349 const void *private_data,
2350 u8 private_data_len)
2351{
2352 struct cm_id_private *cm_id_priv;
2353 struct ib_mad_send_buf *msg;
2354 unsigned long flags;
2355 void *data;
2356 int ret;
2357
2358 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2359 return -EINVAL;
2360
2361 data = cm_copy_private_data(private_data, private_data_len);
2362 if (IS_ERR(ptr: data))
2363 return PTR_ERR(ptr: data);
2364
2365 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2366 spin_lock_irqsave(&cm_id_priv->lock, flags);
2367 if (cm_id->state != IB_CM_REP_RCVD &&
2368 cm_id->state != IB_CM_MRA_REP_SENT) {
2369 trace_icm_send_cm_rtu_err(cm_id);
2370 ret = -EINVAL;
2371 goto error;
2372 }
2373
2374 msg = cm_alloc_msg(cm_id_priv);
2375 if (IS_ERR(ptr: msg)) {
2376 ret = PTR_ERR(ptr: msg);
2377 goto error;
2378 }
2379
2380 cm_format_rtu(rtu_msg: (struct cm_rtu_msg *) msg->mad, cm_id_priv,
2381 private_data, private_data_len);
2382
2383 trace_icm_send_rtu(cm_id);
2384 ret = ib_post_send_mad(send_buf: msg, NULL);
2385 if (ret) {
2386 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2387 cm_free_msg(msg);
2388 kfree(objp: data);
2389 return ret;
2390 }
2391
2392 cm_id->state = IB_CM_ESTABLISHED;
2393 cm_set_private_data(cm_id_priv, private_data: data, private_data_len);
2394 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2395 return 0;
2396
2397error: spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2398 kfree(objp: data);
2399 return ret;
2400}
2401EXPORT_SYMBOL(ib_send_cm_rtu);
2402
2403static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2404{
2405 struct cm_rep_msg *rep_msg;
2406 struct ib_cm_rep_event_param *param;
2407
2408 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2409 param = &work->cm_event.param.rep_rcvd;
2410 param->remote_ca_guid =
2411 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2412 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2413 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2414 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2415 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2416 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2417 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2418 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2419 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2420 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2421 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2422 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2423 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2424 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2425 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2426
2427 work->cm_event.private_data =
2428 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2429}
2430
2431static void cm_dup_rep_handler(struct cm_work *work)
2432{
2433 struct cm_id_private *cm_id_priv;
2434 struct cm_rep_msg *rep_msg;
2435 struct ib_mad_send_buf *msg = NULL;
2436 int ret;
2437
2438 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2439 cm_id_priv = cm_acquire_id(
2440 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2441 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2442 if (!cm_id_priv)
2443 return;
2444
2445 atomic_long_inc(
2446 v: &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2447 ret = cm_alloc_response_msg(port: work->port, mad_recv_wc: work->mad_recv_wc, msg: &msg);
2448 if (ret)
2449 goto deref;
2450
2451 spin_lock_irq(lock: &cm_id_priv->lock);
2452 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2453 cm_format_rtu(rtu_msg: (struct cm_rtu_msg *) msg->mad, cm_id_priv,
2454 private_data: cm_id_priv->private_data,
2455 private_data_len: cm_id_priv->private_data_len);
2456 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2457 cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv,
2458 msg_mraed: CM_MSG_RESPONSE_REP, service_timeout: cm_id_priv->service_timeout,
2459 private_data: cm_id_priv->private_data,
2460 private_data_len: cm_id_priv->private_data_len);
2461 else
2462 goto unlock;
2463 spin_unlock_irq(lock: &cm_id_priv->lock);
2464
2465 trace_icm_send_dup_rep(cm_id: &cm_id_priv->id);
2466 ret = ib_post_send_mad(send_buf: msg, NULL);
2467 if (ret)
2468 goto free;
2469 goto deref;
2470
2471unlock: spin_unlock_irq(lock: &cm_id_priv->lock);
2472free: cm_free_response_msg(msg);
2473deref: cm_deref_id(cm_id_priv);
2474}
2475
2476static int cm_rep_handler(struct cm_work *work)
2477{
2478 struct cm_id_private *cm_id_priv;
2479 struct cm_rep_msg *rep_msg;
2480 int ret;
2481 struct cm_id_private *cur_cm_id_priv;
2482 struct cm_timewait_info *timewait_info;
2483
2484 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2485 cm_id_priv = cm_acquire_id(
2486 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), remote_id: 0);
2487 if (!cm_id_priv) {
2488 cm_dup_rep_handler(work);
2489 trace_icm_remote_no_priv_err(
2490 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2491 return -EINVAL;
2492 }
2493
2494 cm_format_rep_event(work, qp_type: cm_id_priv->qp_type);
2495
2496 spin_lock_irq(lock: &cm_id_priv->lock);
2497 switch (cm_id_priv->id.state) {
2498 case IB_CM_REQ_SENT:
2499 case IB_CM_MRA_REQ_RCVD:
2500 break;
2501 default:
2502 ret = -EINVAL;
2503 trace_icm_rep_unknown_err(
2504 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2505 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2506 state: cm_id_priv->id.state);
2507 spin_unlock_irq(lock: &cm_id_priv->lock);
2508 goto error;
2509 }
2510
2511 cm_id_priv->timewait_info->work.remote_id =
2512 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2513 cm_id_priv->timewait_info->remote_ca_guid =
2514 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2515 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, qp_type: cm_id_priv->qp_type);
2516
2517 spin_lock(lock: &cm.lock);
2518 /* Check for duplicate REP. */
2519 if (cm_insert_remote_id(timewait_info: cm_id_priv->timewait_info)) {
2520 spin_unlock(lock: &cm.lock);
2521 spin_unlock_irq(lock: &cm_id_priv->lock);
2522 ret = -EINVAL;
2523 trace_icm_insert_failed_err(
2524 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2525 goto error;
2526 }
2527 /* Check for a stale connection. */
2528 timewait_info = cm_insert_remote_qpn(timewait_info: cm_id_priv->timewait_info);
2529 if (timewait_info) {
2530 cm_remove_remote(cm_id_priv);
2531 cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id,
2532 remote_id: timewait_info->work.remote_id);
2533
2534 spin_unlock(lock: &cm.lock);
2535 spin_unlock_irq(lock: &cm_id_priv->lock);
2536 cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc,
2537 reason: IB_CM_REJ_STALE_CONN, msg_rejected: CM_MSG_RESPONSE_REP,
2538 NULL, ari_length: 0);
2539 ret = -EINVAL;
2540 trace_icm_staleconn_err(
2541 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2542 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2543
2544 if (cur_cm_id_priv) {
2545 ib_send_cm_dreq(cm_id: &cur_cm_id_priv->id, NULL, private_data_len: 0);
2546 cm_deref_id(cm_id_priv: cur_cm_id_priv);
2547 }
2548
2549 goto error;
2550 }
2551 spin_unlock(lock: &cm.lock);
2552
2553 cm_id_priv->id.state = IB_CM_REP_RCVD;
2554 cm_id_priv->id.remote_id =
2555 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2556 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, qp_type: cm_id_priv->qp_type);
2557 cm_id_priv->initiator_depth =
2558 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2559 cm_id_priv->responder_resources =
2560 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2561 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2562 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2563 cm_id_priv->target_ack_delay =
2564 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2565 cm_id_priv->av.timeout =
2566 cm_ack_timeout(ca_ack_delay: cm_id_priv->target_ack_delay,
2567 packet_life_time: cm_id_priv->av.timeout - 1);
2568 cm_id_priv->alt_av.timeout =
2569 cm_ack_timeout(ca_ack_delay: cm_id_priv->target_ack_delay,
2570 packet_life_time: cm_id_priv->alt_av.timeout - 1);
2571
2572 ib_cancel_mad(send_buf: cm_id_priv->msg);
2573 cm_queue_work_unlock(cm_id_priv, work);
2574 return 0;
2575
2576error:
2577 cm_deref_id(cm_id_priv);
2578 return ret;
2579}
2580
2581static int cm_establish_handler(struct cm_work *work)
2582{
2583 struct cm_id_private *cm_id_priv;
2584
2585 /* See comment in cm_establish about lookup. */
2586 cm_id_priv = cm_acquire_id(local_id: work->local_id, remote_id: work->remote_id);
2587 if (!cm_id_priv)
2588 return -EINVAL;
2589
2590 spin_lock_irq(lock: &cm_id_priv->lock);
2591 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2592 spin_unlock_irq(lock: &cm_id_priv->lock);
2593 goto out;
2594 }
2595
2596 ib_cancel_mad(send_buf: cm_id_priv->msg);
2597 cm_queue_work_unlock(cm_id_priv, work);
2598 return 0;
2599out:
2600 cm_deref_id(cm_id_priv);
2601 return -EINVAL;
2602}
2603
2604static int cm_rtu_handler(struct cm_work *work)
2605{
2606 struct cm_id_private *cm_id_priv;
2607 struct cm_rtu_msg *rtu_msg;
2608
2609 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2610 cm_id_priv = cm_acquire_id(
2611 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2612 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2613 if (!cm_id_priv)
2614 return -EINVAL;
2615
2616 work->cm_event.private_data =
2617 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2618
2619 spin_lock_irq(lock: &cm_id_priv->lock);
2620 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2621 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2622 spin_unlock_irq(lock: &cm_id_priv->lock);
2623 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
2624 [CM_RTU_COUNTER]);
2625 goto out;
2626 }
2627 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2628
2629 ib_cancel_mad(send_buf: cm_id_priv->msg);
2630 cm_queue_work_unlock(cm_id_priv, work);
2631 return 0;
2632out:
2633 cm_deref_id(cm_id_priv);
2634 return -EINVAL;
2635}
2636
2637static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2638 struct cm_id_private *cm_id_priv,
2639 const void *private_data,
2640 u8 private_data_len)
2641{
2642 cm_format_mad_hdr(hdr: &dreq_msg->hdr, CM_DREQ_ATTR_ID,
2643 tid: cm_form_tid(cm_id_priv));
2644 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2645 be32_to_cpu(cm_id_priv->id.local_id));
2646 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2647 be32_to_cpu(cm_id_priv->id.remote_id));
2648 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2649 be32_to_cpu(cm_id_priv->remote_qpn));
2650
2651 if (private_data && private_data_len)
2652 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2653 private_data_len);
2654}
2655
2656static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2657 const void *private_data, u8 private_data_len)
2658{
2659 struct ib_mad_send_buf *msg;
2660 int ret;
2661
2662 lockdep_assert_held(&cm_id_priv->lock);
2663
2664 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2665 return -EINVAL;
2666
2667 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2668 trace_icm_dreq_skipped(cm_id: &cm_id_priv->id);
2669 return -EINVAL;
2670 }
2671
2672 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2673 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2674 ib_cancel_mad(send_buf: cm_id_priv->msg);
2675
2676 msg = cm_alloc_priv_msg(cm_id_priv);
2677 if (IS_ERR(ptr: msg)) {
2678 cm_enter_timewait(cm_id_priv);
2679 return PTR_ERR(ptr: msg);
2680 }
2681
2682 cm_format_dreq(dreq_msg: (struct cm_dreq_msg *) msg->mad, cm_id_priv,
2683 private_data, private_data_len);
2684 msg->timeout_ms = cm_id_priv->timeout_ms;
2685 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2686
2687 trace_icm_send_dreq(cm_id: &cm_id_priv->id);
2688 ret = ib_post_send_mad(send_buf: msg, NULL);
2689 if (ret) {
2690 cm_enter_timewait(cm_id_priv);
2691 cm_free_priv_msg(msg);
2692 return ret;
2693 }
2694
2695 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2696 return 0;
2697}
2698
2699int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2700 u8 private_data_len)
2701{
2702 struct cm_id_private *cm_id_priv =
2703 container_of(cm_id, struct cm_id_private, id);
2704 unsigned long flags;
2705 int ret;
2706
2707 spin_lock_irqsave(&cm_id_priv->lock, flags);
2708 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2709 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2710 return ret;
2711}
2712EXPORT_SYMBOL(ib_send_cm_dreq);
2713
2714static void cm_format_drep(struct cm_drep_msg *drep_msg,
2715 struct cm_id_private *cm_id_priv,
2716 const void *private_data,
2717 u8 private_data_len)
2718{
2719 cm_format_mad_hdr(hdr: &drep_msg->hdr, CM_DREP_ATTR_ID, tid: cm_id_priv->tid);
2720 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2721 be32_to_cpu(cm_id_priv->id.local_id));
2722 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2723 be32_to_cpu(cm_id_priv->id.remote_id));
2724
2725 if (private_data && private_data_len)
2726 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2727 private_data_len);
2728}
2729
2730static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2731 void *private_data, u8 private_data_len)
2732{
2733 struct ib_mad_send_buf *msg;
2734 int ret;
2735
2736 lockdep_assert_held(&cm_id_priv->lock);
2737
2738 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2739 return -EINVAL;
2740
2741 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2742 trace_icm_send_drep_err(cm_id: &cm_id_priv->id);
2743 kfree(objp: private_data);
2744 return -EINVAL;
2745 }
2746
2747 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2748 cm_enter_timewait(cm_id_priv);
2749
2750 msg = cm_alloc_msg(cm_id_priv);
2751 if (IS_ERR(ptr: msg))
2752 return PTR_ERR(ptr: msg);
2753
2754 cm_format_drep(drep_msg: (struct cm_drep_msg *) msg->mad, cm_id_priv,
2755 private_data, private_data_len);
2756
2757 trace_icm_send_drep(cm_id: &cm_id_priv->id);
2758 ret = ib_post_send_mad(send_buf: msg, NULL);
2759 if (ret) {
2760 cm_free_msg(msg);
2761 return ret;
2762 }
2763 return 0;
2764}
2765
2766int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2767 u8 private_data_len)
2768{
2769 struct cm_id_private *cm_id_priv =
2770 container_of(cm_id, struct cm_id_private, id);
2771 unsigned long flags;
2772 void *data;
2773 int ret;
2774
2775 data = cm_copy_private_data(private_data, private_data_len);
2776 if (IS_ERR(ptr: data))
2777 return PTR_ERR(ptr: data);
2778
2779 spin_lock_irqsave(&cm_id_priv->lock, flags);
2780 ret = cm_send_drep_locked(cm_id_priv, private_data: data, private_data_len);
2781 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2782 return ret;
2783}
2784EXPORT_SYMBOL(ib_send_cm_drep);
2785
2786static int cm_issue_drep(struct cm_port *port,
2787 struct ib_mad_recv_wc *mad_recv_wc)
2788{
2789 struct ib_mad_send_buf *msg = NULL;
2790 struct cm_dreq_msg *dreq_msg;
2791 struct cm_drep_msg *drep_msg;
2792 int ret;
2793
2794 ret = cm_alloc_response_msg(port, mad_recv_wc, msg: &msg);
2795 if (ret)
2796 return ret;
2797
2798 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2799 drep_msg = (struct cm_drep_msg *) msg->mad;
2800
2801 cm_format_mad_hdr(hdr: &drep_msg->hdr, CM_DREP_ATTR_ID, tid: dreq_msg->hdr.tid);
2802 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2803 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2804 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2805 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2806
2807 trace_icm_issue_drep(
2808 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2809 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2810 ret = ib_post_send_mad(send_buf: msg, NULL);
2811 if (ret)
2812 cm_free_response_msg(msg);
2813
2814 return ret;
2815}
2816
2817static int cm_dreq_handler(struct cm_work *work)
2818{
2819 struct cm_id_private *cm_id_priv;
2820 struct cm_dreq_msg *dreq_msg;
2821 struct ib_mad_send_buf *msg = NULL;
2822
2823 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2824 cm_id_priv = cm_acquire_id(
2825 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2826 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2827 if (!cm_id_priv) {
2828 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
2829 [CM_DREQ_COUNTER]);
2830 cm_issue_drep(port: work->port, mad_recv_wc: work->mad_recv_wc);
2831 trace_icm_no_priv_err(
2832 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2833 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2834 return -EINVAL;
2835 }
2836
2837 work->cm_event.private_data =
2838 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2839
2840 spin_lock_irq(lock: &cm_id_priv->lock);
2841 if (cm_id_priv->local_qpn !=
2842 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2843 goto unlock;
2844
2845 switch (cm_id_priv->id.state) {
2846 case IB_CM_REP_SENT:
2847 case IB_CM_DREQ_SENT:
2848 case IB_CM_MRA_REP_RCVD:
2849 ib_cancel_mad(send_buf: cm_id_priv->msg);
2850 break;
2851 case IB_CM_ESTABLISHED:
2852 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2853 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2854 ib_cancel_mad(send_buf: cm_id_priv->msg);
2855 break;
2856 case IB_CM_TIMEWAIT:
2857 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
2858 [CM_DREQ_COUNTER]);
2859 msg = cm_alloc_response_msg_no_ah(port: work->port, mad_recv_wc: work->mad_recv_wc);
2860 if (IS_ERR(ptr: msg))
2861 goto unlock;
2862
2863 cm_format_drep(drep_msg: (struct cm_drep_msg *) msg->mad, cm_id_priv,
2864 private_data: cm_id_priv->private_data,
2865 private_data_len: cm_id_priv->private_data_len);
2866 spin_unlock_irq(lock: &cm_id_priv->lock);
2867
2868 if (cm_create_response_msg_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, msg) ||
2869 ib_post_send_mad(send_buf: msg, NULL))
2870 cm_free_response_msg(msg);
2871 goto deref;
2872 case IB_CM_DREQ_RCVD:
2873 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
2874 [CM_DREQ_COUNTER]);
2875 goto unlock;
2876 default:
2877 trace_icm_dreq_unknown_err(cm_id: &cm_id_priv->id);
2878 goto unlock;
2879 }
2880 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2881 cm_id_priv->tid = dreq_msg->hdr.tid;
2882 cm_queue_work_unlock(cm_id_priv, work);
2883 return 0;
2884
2885unlock: spin_unlock_irq(lock: &cm_id_priv->lock);
2886deref: cm_deref_id(cm_id_priv);
2887 return -EINVAL;
2888}
2889
2890static int cm_drep_handler(struct cm_work *work)
2891{
2892 struct cm_id_private *cm_id_priv;
2893 struct cm_drep_msg *drep_msg;
2894
2895 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2896 cm_id_priv = cm_acquire_id(
2897 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2898 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2899 if (!cm_id_priv)
2900 return -EINVAL;
2901
2902 work->cm_event.private_data =
2903 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2904
2905 spin_lock_irq(lock: &cm_id_priv->lock);
2906 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2907 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2908 spin_unlock_irq(lock: &cm_id_priv->lock);
2909 goto out;
2910 }
2911 cm_enter_timewait(cm_id_priv);
2912
2913 ib_cancel_mad(send_buf: cm_id_priv->msg);
2914 cm_queue_work_unlock(cm_id_priv, work);
2915 return 0;
2916out:
2917 cm_deref_id(cm_id_priv);
2918 return -EINVAL;
2919}
2920
2921static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2922 enum ib_cm_rej_reason reason, void *ari,
2923 u8 ari_length, const void *private_data,
2924 u8 private_data_len)
2925{
2926 enum ib_cm_state state = cm_id_priv->id.state;
2927 struct ib_mad_send_buf *msg;
2928 int ret;
2929
2930 lockdep_assert_held(&cm_id_priv->lock);
2931
2932 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2933 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2934 return -EINVAL;
2935
2936 trace_icm_send_rej(cm_id: &cm_id_priv->id, reason);
2937
2938 switch (state) {
2939 case IB_CM_REQ_SENT:
2940 case IB_CM_MRA_REQ_RCVD:
2941 case IB_CM_REQ_RCVD:
2942 case IB_CM_MRA_REQ_SENT:
2943 case IB_CM_REP_RCVD:
2944 case IB_CM_MRA_REP_SENT:
2945 cm_reset_to_idle(cm_id_priv);
2946 msg = cm_alloc_msg(cm_id_priv);
2947 if (IS_ERR(ptr: msg))
2948 return PTR_ERR(ptr: msg);
2949 cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2950 ari, ari_length, private_data, private_data_len,
2951 state);
2952 break;
2953 case IB_CM_REP_SENT:
2954 case IB_CM_MRA_REP_RCVD:
2955 cm_enter_timewait(cm_id_priv);
2956 msg = cm_alloc_msg(cm_id_priv);
2957 if (IS_ERR(ptr: msg))
2958 return PTR_ERR(ptr: msg);
2959 cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2960 ari, ari_length, private_data, private_data_len,
2961 state);
2962 break;
2963 default:
2964 trace_icm_send_unknown_rej_err(cm_id: &cm_id_priv->id);
2965 return -EINVAL;
2966 }
2967
2968 ret = ib_post_send_mad(send_buf: msg, NULL);
2969 if (ret) {
2970 cm_free_msg(msg);
2971 return ret;
2972 }
2973
2974 return 0;
2975}
2976
2977int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2978 void *ari, u8 ari_length, const void *private_data,
2979 u8 private_data_len)
2980{
2981 struct cm_id_private *cm_id_priv =
2982 container_of(cm_id, struct cm_id_private, id);
2983 unsigned long flags;
2984 int ret;
2985
2986 spin_lock_irqsave(&cm_id_priv->lock, flags);
2987 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2988 private_data, private_data_len);
2989 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
2990 return ret;
2991}
2992EXPORT_SYMBOL(ib_send_cm_rej);
2993
2994static void cm_format_rej_event(struct cm_work *work)
2995{
2996 struct cm_rej_msg *rej_msg;
2997 struct ib_cm_rej_event_param *param;
2998
2999 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3000 param = &work->cm_event.param.rej_rcvd;
3001 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
3002 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
3003 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
3004 work->cm_event.private_data =
3005 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
3006}
3007
3008static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
3009{
3010 struct cm_id_private *cm_id_priv;
3011 __be32 remote_id;
3012
3013 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3014
3015 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3016 cm_id_priv = cm_find_remote_id(
3017 remote_ca_guid: *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3018 remote_id);
3019 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3020 CM_MSG_RESPONSE_REQ)
3021 cm_id_priv = cm_acquire_id(
3022 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3023 remote_id: 0);
3024 else
3025 cm_id_priv = cm_acquire_id(
3026 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3027 remote_id);
3028
3029 return cm_id_priv;
3030}
3031
3032static int cm_rej_handler(struct cm_work *work)
3033{
3034 struct cm_id_private *cm_id_priv;
3035 struct cm_rej_msg *rej_msg;
3036
3037 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3038 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3039 if (!cm_id_priv)
3040 return -EINVAL;
3041
3042 cm_format_rej_event(work);
3043
3044 spin_lock_irq(lock: &cm_id_priv->lock);
3045 switch (cm_id_priv->id.state) {
3046 case IB_CM_REQ_SENT:
3047 case IB_CM_MRA_REQ_RCVD:
3048 case IB_CM_REP_SENT:
3049 case IB_CM_MRA_REP_RCVD:
3050 ib_cancel_mad(send_buf: cm_id_priv->msg);
3051 fallthrough;
3052 case IB_CM_REQ_RCVD:
3053 case IB_CM_MRA_REQ_SENT:
3054 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3055 cm_enter_timewait(cm_id_priv);
3056 else
3057 cm_reset_to_idle(cm_id_priv);
3058 break;
3059 case IB_CM_DREQ_SENT:
3060 ib_cancel_mad(send_buf: cm_id_priv->msg);
3061 fallthrough;
3062 case IB_CM_REP_RCVD:
3063 case IB_CM_MRA_REP_SENT:
3064 cm_enter_timewait(cm_id_priv);
3065 break;
3066 case IB_CM_ESTABLISHED:
3067 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3068 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3069 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3070 ib_cancel_mad(send_buf: cm_id_priv->msg);
3071 cm_enter_timewait(cm_id_priv);
3072 break;
3073 }
3074 fallthrough;
3075 default:
3076 trace_icm_rej_unknown_err(cm_id: &cm_id_priv->id);
3077 spin_unlock_irq(lock: &cm_id_priv->lock);
3078 goto out;
3079 }
3080
3081 cm_queue_work_unlock(cm_id_priv, work);
3082 return 0;
3083out:
3084 cm_deref_id(cm_id_priv);
3085 return -EINVAL;
3086}
3087
3088int ib_send_cm_mra(struct ib_cm_id *cm_id,
3089 u8 service_timeout,
3090 const void *private_data,
3091 u8 private_data_len)
3092{
3093 struct cm_id_private *cm_id_priv;
3094 struct ib_mad_send_buf *msg;
3095 enum ib_cm_state cm_state;
3096 enum ib_cm_lap_state lap_state;
3097 enum cm_msg_response msg_response;
3098 void *data;
3099 unsigned long flags;
3100 int ret;
3101
3102 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3103 return -EINVAL;
3104
3105 data = cm_copy_private_data(private_data, private_data_len);
3106 if (IS_ERR(ptr: data))
3107 return PTR_ERR(ptr: data);
3108
3109 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3110
3111 spin_lock_irqsave(&cm_id_priv->lock, flags);
3112 switch (cm_id_priv->id.state) {
3113 case IB_CM_REQ_RCVD:
3114 cm_state = IB_CM_MRA_REQ_SENT;
3115 lap_state = cm_id->lap_state;
3116 msg_response = CM_MSG_RESPONSE_REQ;
3117 break;
3118 case IB_CM_REP_RCVD:
3119 cm_state = IB_CM_MRA_REP_SENT;
3120 lap_state = cm_id->lap_state;
3121 msg_response = CM_MSG_RESPONSE_REP;
3122 break;
3123 case IB_CM_ESTABLISHED:
3124 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3125 cm_state = cm_id->state;
3126 lap_state = IB_CM_MRA_LAP_SENT;
3127 msg_response = CM_MSG_RESPONSE_OTHER;
3128 break;
3129 }
3130 fallthrough;
3131 default:
3132 trace_icm_send_mra_unknown_err(cm_id: &cm_id_priv->id);
3133 ret = -EINVAL;
3134 goto error_unlock;
3135 }
3136
3137 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3138 msg = cm_alloc_msg(cm_id_priv);
3139 if (IS_ERR(ptr: msg)) {
3140 ret = PTR_ERR(ptr: msg);
3141 goto error_unlock;
3142 }
3143
3144 cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv,
3145 msg_mraed: msg_response, service_timeout,
3146 private_data, private_data_len);
3147 trace_icm_send_mra(cm_id);
3148 ret = ib_post_send_mad(send_buf: msg, NULL);
3149 if (ret)
3150 goto error_free_msg;
3151 }
3152
3153 cm_id->state = cm_state;
3154 cm_id->lap_state = lap_state;
3155 cm_id_priv->service_timeout = service_timeout;
3156 cm_set_private_data(cm_id_priv, private_data: data, private_data_len);
3157 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3158 return 0;
3159
3160error_free_msg:
3161 cm_free_msg(msg);
3162error_unlock:
3163 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3164 kfree(objp: data);
3165 return ret;
3166}
3167EXPORT_SYMBOL(ib_send_cm_mra);
3168
3169static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3170{
3171 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3172 case CM_MSG_RESPONSE_REQ:
3173 return cm_acquire_id(
3174 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3175 remote_id: 0);
3176 case CM_MSG_RESPONSE_REP:
3177 case CM_MSG_RESPONSE_OTHER:
3178 return cm_acquire_id(
3179 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3180 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3181 default:
3182 return NULL;
3183 }
3184}
3185
3186static int cm_mra_handler(struct cm_work *work)
3187{
3188 struct cm_id_private *cm_id_priv;
3189 struct cm_mra_msg *mra_msg;
3190 int timeout;
3191
3192 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3193 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3194 if (!cm_id_priv)
3195 return -EINVAL;
3196
3197 work->cm_event.private_data =
3198 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3199 work->cm_event.param.mra_rcvd.service_timeout =
3200 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3201 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3202 cm_convert_to_ms(iba_time: cm_id_priv->av.timeout);
3203
3204 spin_lock_irq(lock: &cm_id_priv->lock);
3205 switch (cm_id_priv->id.state) {
3206 case IB_CM_REQ_SENT:
3207 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3208 CM_MSG_RESPONSE_REQ ||
3209 ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout))
3210 goto out;
3211 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3212 break;
3213 case IB_CM_REP_SENT:
3214 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3215 CM_MSG_RESPONSE_REP ||
3216 ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout))
3217 goto out;
3218 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3219 break;
3220 case IB_CM_ESTABLISHED:
3221 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3222 CM_MSG_RESPONSE_OTHER ||
3223 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3224 ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout)) {
3225 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3226 atomic_long_inc(
3227 v: &work->port->counters[CM_RECV_DUPLICATES]
3228 [CM_MRA_COUNTER]);
3229 goto out;
3230 }
3231 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3232 break;
3233 case IB_CM_MRA_REQ_RCVD:
3234 case IB_CM_MRA_REP_RCVD:
3235 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
3236 [CM_MRA_COUNTER]);
3237 fallthrough;
3238 default:
3239 trace_icm_mra_unknown_err(cm_id: &cm_id_priv->id);
3240 goto out;
3241 }
3242
3243 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3244 cm_id_priv->id.state;
3245 cm_queue_work_unlock(cm_id_priv, work);
3246 return 0;
3247out:
3248 spin_unlock_irq(lock: &cm_id_priv->lock);
3249 cm_deref_id(cm_id_priv);
3250 return -EINVAL;
3251}
3252
3253static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3254 struct sa_path_rec *path)
3255{
3256 u32 lid;
3257
3258 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3259 sa_path_set_dlid(rec: path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3260 lap_msg));
3261 sa_path_set_slid(rec: path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3262 lap_msg));
3263 } else {
3264 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3265 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3266 sa_path_set_dlid(rec: path, dlid: lid);
3267
3268 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3269 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3270 sa_path_set_slid(rec: path, slid: lid);
3271 }
3272}
3273
3274static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3275 struct sa_path_rec *path,
3276 struct cm_lap_msg *lap_msg)
3277{
3278 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3279 path->sgid =
3280 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3281 path->flow_label =
3282 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3283 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3284 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3285 path->reversible = 1;
3286 path->pkey = cm_id_priv->pkey;
3287 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3288 path->mtu_selector = IB_SA_EQ;
3289 path->mtu = cm_id_priv->path_mtu;
3290 path->rate_selector = IB_SA_EQ;
3291 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3292 path->packet_life_time_selector = IB_SA_EQ;
3293 path->packet_life_time =
3294 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3295 path->packet_life_time -= (path->packet_life_time > 0);
3296 cm_format_path_lid_from_lap(lap_msg, path);
3297}
3298
3299static int cm_lap_handler(struct cm_work *work)
3300{
3301 struct cm_id_private *cm_id_priv;
3302 struct cm_lap_msg *lap_msg;
3303 struct ib_cm_lap_event_param *param;
3304 struct ib_mad_send_buf *msg = NULL;
3305 struct rdma_ah_attr ah_attr;
3306 struct cm_av alt_av = {};
3307 int ret;
3308
3309 /* Currently Alternate path messages are not supported for
3310 * RoCE link layer.
3311 */
3312 if (rdma_protocol_roce(device: work->port->cm_dev->ib_device,
3313 port_num: work->port->port_num))
3314 return -EINVAL;
3315
3316 /* todo: verify LAP request and send reject APR if invalid. */
3317 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3318 cm_id_priv = cm_acquire_id(
3319 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3320 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3321 if (!cm_id_priv)
3322 return -EINVAL;
3323
3324 param = &work->cm_event.param.lap_rcvd;
3325 memset(&work->path[0], 0, sizeof(work->path[1]));
3326 cm_path_set_rec_type(ib_device: work->port->cm_dev->ib_device,
3327 port_num: work->port->port_num, path: &work->path[0],
3328 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3329 lap_msg));
3330 param->alternate_path = &work->path[0];
3331 cm_format_path_from_lap(cm_id_priv, path: param->alternate_path, lap_msg);
3332 work->cm_event.private_data =
3333 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3334
3335 ret = ib_init_ah_attr_from_wc(device: work->port->cm_dev->ib_device,
3336 port_num: work->port->port_num,
3337 wc: work->mad_recv_wc->wc,
3338 grh: work->mad_recv_wc->recv_buf.grh,
3339 ah_attr: &ah_attr);
3340 if (ret)
3341 goto deref;
3342
3343 ret = cm_init_av_by_path(path: param->alternate_path, NULL, av: &alt_av);
3344 if (ret) {
3345 rdma_destroy_ah_attr(ah_attr: &ah_attr);
3346 goto deref;
3347 }
3348
3349 spin_lock_irq(lock: &cm_id_priv->lock);
3350 cm_init_av_for_lap(port: work->port, wc: work->mad_recv_wc->wc,
3351 ah_attr: &ah_attr, av: &cm_id_priv->av);
3352 cm_move_av_from_path(dest: &cm_id_priv->alt_av, src: &alt_av);
3353
3354 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3355 goto unlock;
3356
3357 switch (cm_id_priv->id.lap_state) {
3358 case IB_CM_LAP_UNINIT:
3359 case IB_CM_LAP_IDLE:
3360 break;
3361 case IB_CM_MRA_LAP_SENT:
3362 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
3363 [CM_LAP_COUNTER]);
3364 msg = cm_alloc_response_msg_no_ah(port: work->port, mad_recv_wc: work->mad_recv_wc);
3365 if (IS_ERR(ptr: msg))
3366 goto unlock;
3367
3368 cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv,
3369 msg_mraed: CM_MSG_RESPONSE_OTHER,
3370 service_timeout: cm_id_priv->service_timeout,
3371 private_data: cm_id_priv->private_data,
3372 private_data_len: cm_id_priv->private_data_len);
3373 spin_unlock_irq(lock: &cm_id_priv->lock);
3374
3375 if (cm_create_response_msg_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, msg) ||
3376 ib_post_send_mad(send_buf: msg, NULL))
3377 cm_free_response_msg(msg);
3378 goto deref;
3379 case IB_CM_LAP_RCVD:
3380 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
3381 [CM_LAP_COUNTER]);
3382 goto unlock;
3383 default:
3384 goto unlock;
3385 }
3386
3387 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3388 cm_id_priv->tid = lap_msg->hdr.tid;
3389 cm_queue_work_unlock(cm_id_priv, work);
3390 return 0;
3391
3392unlock: spin_unlock_irq(lock: &cm_id_priv->lock);
3393deref: cm_deref_id(cm_id_priv);
3394 return -EINVAL;
3395}
3396
3397static int cm_apr_handler(struct cm_work *work)
3398{
3399 struct cm_id_private *cm_id_priv;
3400 struct cm_apr_msg *apr_msg;
3401
3402 /* Currently Alternate path messages are not supported for
3403 * RoCE link layer.
3404 */
3405 if (rdma_protocol_roce(device: work->port->cm_dev->ib_device,
3406 port_num: work->port->port_num))
3407 return -EINVAL;
3408
3409 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3410 cm_id_priv = cm_acquire_id(
3411 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3412 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3413 if (!cm_id_priv)
3414 return -EINVAL; /* Unmatched reply. */
3415
3416 work->cm_event.param.apr_rcvd.ap_status =
3417 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3418 work->cm_event.param.apr_rcvd.apr_info =
3419 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3420 work->cm_event.param.apr_rcvd.info_len =
3421 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3422 work->cm_event.private_data =
3423 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3424
3425 spin_lock_irq(lock: &cm_id_priv->lock);
3426 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3427 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3428 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3429 spin_unlock_irq(lock: &cm_id_priv->lock);
3430 goto out;
3431 }
3432 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3433 ib_cancel_mad(send_buf: cm_id_priv->msg);
3434 cm_queue_work_unlock(cm_id_priv, work);
3435 return 0;
3436out:
3437 cm_deref_id(cm_id_priv);
3438 return -EINVAL;
3439}
3440
3441static int cm_timewait_handler(struct cm_work *work)
3442{
3443 struct cm_timewait_info *timewait_info;
3444 struct cm_id_private *cm_id_priv;
3445
3446 timewait_info = container_of(work, struct cm_timewait_info, work);
3447 spin_lock_irq(lock: &cm.lock);
3448 list_del(entry: &timewait_info->list);
3449 spin_unlock_irq(lock: &cm.lock);
3450
3451 cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id,
3452 remote_id: timewait_info->work.remote_id);
3453 if (!cm_id_priv)
3454 return -EINVAL;
3455
3456 spin_lock_irq(lock: &cm_id_priv->lock);
3457 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3458 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3459 spin_unlock_irq(lock: &cm_id_priv->lock);
3460 goto out;
3461 }
3462 cm_id_priv->id.state = IB_CM_IDLE;
3463 cm_queue_work_unlock(cm_id_priv, work);
3464 return 0;
3465out:
3466 cm_deref_id(cm_id_priv);
3467 return -EINVAL;
3468}
3469
3470static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3471 struct cm_id_private *cm_id_priv,
3472 struct ib_cm_sidr_req_param *param)
3473{
3474 cm_format_mad_hdr(hdr: &sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3475 tid: cm_form_tid(cm_id_priv));
3476 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3477 be32_to_cpu(cm_id_priv->id.local_id));
3478 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3479 be16_to_cpu(param->path->pkey));
3480 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3481 be64_to_cpu(param->service_id));
3482
3483 if (param->private_data && param->private_data_len)
3484 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3485 param->private_data, param->private_data_len);
3486}
3487
3488int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3489 struct ib_cm_sidr_req_param *param)
3490{
3491 struct cm_id_private *cm_id_priv;
3492 struct ib_mad_send_buf *msg;
3493 struct cm_av av = {};
3494 unsigned long flags;
3495 int ret;
3496
3497 if (!param->path || (param->private_data &&
3498 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3499 return -EINVAL;
3500
3501 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3502 ret = cm_init_av_by_path(path: param->path, sgid_attr: param->sgid_attr, av: &av);
3503 if (ret)
3504 return ret;
3505
3506 spin_lock_irqsave(&cm_id_priv->lock, flags);
3507 cm_move_av_from_path(dest: &cm_id_priv->av, src: &av);
3508 cm_id->service_id = param->service_id;
3509 cm_id_priv->timeout_ms = param->timeout_ms;
3510 cm_id_priv->max_cm_retries = param->max_cm_retries;
3511 if (cm_id->state != IB_CM_IDLE) {
3512 ret = -EINVAL;
3513 goto out_unlock;
3514 }
3515
3516 msg = cm_alloc_priv_msg(cm_id_priv);
3517 if (IS_ERR(ptr: msg)) {
3518 ret = PTR_ERR(ptr: msg);
3519 goto out_unlock;
3520 }
3521
3522 cm_format_sidr_req(sidr_req_msg: (struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3523 param);
3524 msg->timeout_ms = cm_id_priv->timeout_ms;
3525 msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3526
3527 trace_icm_send_sidr_req(cm_id: &cm_id_priv->id);
3528 ret = ib_post_send_mad(send_buf: msg, NULL);
3529 if (ret)
3530 goto out_free;
3531 cm_id->state = IB_CM_SIDR_REQ_SENT;
3532 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3533 return 0;
3534out_free:
3535 cm_free_priv_msg(msg);
3536out_unlock:
3537 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3538 return ret;
3539}
3540EXPORT_SYMBOL(ib_send_cm_sidr_req);
3541
3542static void cm_format_sidr_req_event(struct cm_work *work,
3543 const struct cm_id_private *rx_cm_id,
3544 struct ib_cm_id *listen_id)
3545{
3546 struct cm_sidr_req_msg *sidr_req_msg;
3547 struct ib_cm_sidr_req_event_param *param;
3548
3549 sidr_req_msg = (struct cm_sidr_req_msg *)
3550 work->mad_recv_wc->recv_buf.mad;
3551 param = &work->cm_event.param.sidr_req_rcvd;
3552 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3553 param->listen_id = listen_id;
3554 param->service_id =
3555 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3556 param->bth_pkey = cm_get_bth_pkey(work);
3557 param->port = work->port->port_num;
3558 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3559 work->cm_event.private_data =
3560 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3561}
3562
3563static int cm_sidr_req_handler(struct cm_work *work)
3564{
3565 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3566 struct cm_sidr_req_msg *sidr_req_msg;
3567 struct ib_wc *wc;
3568 int ret;
3569
3570 cm_id_priv =
3571 cm_alloc_id_priv(device: work->port->cm_dev->ib_device, NULL, NULL);
3572 if (IS_ERR(ptr: cm_id_priv))
3573 return PTR_ERR(ptr: cm_id_priv);
3574
3575 /* Record SGID/SLID and request ID for lookup. */
3576 sidr_req_msg = (struct cm_sidr_req_msg *)
3577 work->mad_recv_wc->recv_buf.mad;
3578
3579 cm_id_priv->id.remote_id =
3580 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3581 cm_id_priv->id.service_id =
3582 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3583 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3584
3585 wc = work->mad_recv_wc->wc;
3586 cm_id_priv->sidr_slid = wc->slid;
3587 ret = cm_init_av_for_response(port: work->port, wc: work->mad_recv_wc->wc,
3588 grh: work->mad_recv_wc->recv_buf.grh,
3589 av: &cm_id_priv->av);
3590 if (ret)
3591 goto out;
3592
3593 spin_lock_irq(lock: &cm.lock);
3594 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3595 if (listen_cm_id_priv) {
3596 spin_unlock_irq(lock: &cm.lock);
3597 atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES]
3598 [CM_SIDR_REQ_COUNTER]);
3599 goto out; /* Duplicate message. */
3600 }
3601 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3602 listen_cm_id_priv = cm_find_listen(device: cm_id_priv->id.device,
3603 service_id: cm_id_priv->id.service_id);
3604 if (!listen_cm_id_priv) {
3605 spin_unlock_irq(lock: &cm.lock);
3606 ib_send_cm_sidr_rep(cm_id: &cm_id_priv->id,
3607 param: &(struct ib_cm_sidr_rep_param){
3608 .status = IB_SIDR_UNSUPPORTED });
3609 goto out; /* No match. */
3610 }
3611 spin_unlock_irq(lock: &cm.lock);
3612
3613 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3614 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3615
3616 /*
3617 * A SIDR ID does not need to be in the xarray since it does not receive
3618 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3619 * not enter timewait.
3620 */
3621
3622 cm_format_sidr_req_event(work, rx_cm_id: cm_id_priv, listen_id: &listen_cm_id_priv->id);
3623 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3624 cm_free_work(work);
3625 /*
3626 * A pointer to the listen_cm_id is held in the event, so this deref
3627 * must be after the event is delivered above.
3628 */
3629 cm_deref_id(cm_id_priv: listen_cm_id_priv);
3630 if (ret)
3631 cm_destroy_id(cm_id: &cm_id_priv->id, err: ret);
3632 return 0;
3633out:
3634 ib_destroy_cm_id(&cm_id_priv->id);
3635 return -EINVAL;
3636}
3637
3638static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3639 struct cm_id_private *cm_id_priv,
3640 struct ib_cm_sidr_rep_param *param)
3641{
3642 cm_format_mad_ece_hdr(hdr: &sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3643 tid: cm_id_priv->tid, attr_mod: param->ece.attr_mod);
3644 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3645 be32_to_cpu(cm_id_priv->id.remote_id));
3646 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3647 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3648 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3649 be64_to_cpu(cm_id_priv->id.service_id));
3650 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3651 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3652 param->ece.vendor_id & 0xFF);
3653 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3654 (param->ece.vendor_id >> 8) & 0xFF);
3655
3656 if (param->info && param->info_length)
3657 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3658 param->info, param->info_length);
3659
3660 if (param->private_data && param->private_data_len)
3661 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3662 param->private_data, param->private_data_len);
3663}
3664
3665static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3666 struct ib_cm_sidr_rep_param *param)
3667{
3668 struct ib_mad_send_buf *msg;
3669 unsigned long flags;
3670 int ret;
3671
3672 lockdep_assert_held(&cm_id_priv->lock);
3673
3674 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3675 (param->private_data &&
3676 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3677 return -EINVAL;
3678
3679 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3680 return -EINVAL;
3681
3682 msg = cm_alloc_msg(cm_id_priv);
3683 if (IS_ERR(ptr: msg))
3684 return PTR_ERR(ptr: msg);
3685
3686 cm_format_sidr_rep(sidr_rep_msg: (struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3687 param);
3688 trace_icm_send_sidr_rep(cm_id: &cm_id_priv->id);
3689 ret = ib_post_send_mad(send_buf: msg, NULL);
3690 if (ret) {
3691 cm_free_msg(msg);
3692 return ret;
3693 }
3694 cm_id_priv->id.state = IB_CM_IDLE;
3695 spin_lock_irqsave(&cm.lock, flags);
3696 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3697 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3698 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3699 }
3700 spin_unlock_irqrestore(lock: &cm.lock, flags);
3701 return 0;
3702}
3703
3704int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3705 struct ib_cm_sidr_rep_param *param)
3706{
3707 struct cm_id_private *cm_id_priv =
3708 container_of(cm_id, struct cm_id_private, id);
3709 unsigned long flags;
3710 int ret;
3711
3712 spin_lock_irqsave(&cm_id_priv->lock, flags);
3713 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3714 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3715 return ret;
3716}
3717EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3718
3719static void cm_format_sidr_rep_event(struct cm_work *work,
3720 const struct cm_id_private *cm_id_priv)
3721{
3722 struct cm_sidr_rep_msg *sidr_rep_msg;
3723 struct ib_cm_sidr_rep_event_param *param;
3724
3725 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3726 work->mad_recv_wc->recv_buf.mad;
3727 param = &work->cm_event.param.sidr_rep_rcvd;
3728 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3729 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3730 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3731 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3732 sidr_rep_msg);
3733 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3734 sidr_rep_msg);
3735 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3736 work->cm_event.private_data =
3737 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3738}
3739
3740static int cm_sidr_rep_handler(struct cm_work *work)
3741{
3742 struct cm_sidr_rep_msg *sidr_rep_msg;
3743 struct cm_id_private *cm_id_priv;
3744
3745 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3746 work->mad_recv_wc->recv_buf.mad;
3747 cm_id_priv = cm_acquire_id(
3748 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), remote_id: 0);
3749 if (!cm_id_priv)
3750 return -EINVAL; /* Unmatched reply. */
3751
3752 spin_lock_irq(lock: &cm_id_priv->lock);
3753 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3754 spin_unlock_irq(lock: &cm_id_priv->lock);
3755 goto out;
3756 }
3757 cm_id_priv->id.state = IB_CM_IDLE;
3758 ib_cancel_mad(send_buf: cm_id_priv->msg);
3759 spin_unlock_irq(lock: &cm_id_priv->lock);
3760
3761 cm_format_sidr_rep_event(work, cm_id_priv);
3762 cm_process_work(cm_id_priv, work);
3763 return 0;
3764out:
3765 cm_deref_id(cm_id_priv);
3766 return -EINVAL;
3767}
3768
3769static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3770 struct ib_mad_send_buf *msg,
3771 enum ib_cm_state state,
3772 enum ib_wc_status wc_status)
3773{
3774 struct ib_cm_event cm_event = {};
3775 int ret;
3776
3777 /* Discard old sends or ones without a response. */
3778 spin_lock_irq(lock: &cm_id_priv->lock);
3779 if (msg != cm_id_priv->msg) {
3780 spin_unlock_irq(lock: &cm_id_priv->lock);
3781 cm_free_msg(msg);
3782 return;
3783 }
3784 cm_free_priv_msg(msg);
3785
3786 if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3787 wc_status == IB_WC_WR_FLUSH_ERR)
3788 goto out_unlock;
3789
3790 trace_icm_mad_send_err(state, wc_status);
3791 switch (state) {
3792 case IB_CM_REQ_SENT:
3793 case IB_CM_MRA_REQ_RCVD:
3794 cm_reset_to_idle(cm_id_priv);
3795 cm_event.event = IB_CM_REQ_ERROR;
3796 break;
3797 case IB_CM_REP_SENT:
3798 case IB_CM_MRA_REP_RCVD:
3799 cm_reset_to_idle(cm_id_priv);
3800 cm_event.event = IB_CM_REP_ERROR;
3801 break;
3802 case IB_CM_DREQ_SENT:
3803 cm_enter_timewait(cm_id_priv);
3804 cm_event.event = IB_CM_DREQ_ERROR;
3805 break;
3806 case IB_CM_SIDR_REQ_SENT:
3807 cm_id_priv->id.state = IB_CM_IDLE;
3808 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3809 break;
3810 default:
3811 goto out_unlock;
3812 }
3813 spin_unlock_irq(lock: &cm_id_priv->lock);
3814 cm_event.param.send_status = wc_status;
3815
3816 /* No other events can occur on the cm_id at this point. */
3817 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3818 if (ret)
3819 ib_destroy_cm_id(&cm_id_priv->id);
3820 return;
3821out_unlock:
3822 spin_unlock_irq(lock: &cm_id_priv->lock);
3823}
3824
3825static void cm_send_handler(struct ib_mad_agent *mad_agent,
3826 struct ib_mad_send_wc *mad_send_wc)
3827{
3828 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3829 struct cm_id_private *cm_id_priv = msg->context[0];
3830 enum ib_cm_state state =
3831 (enum ib_cm_state)(unsigned long)msg->context[1];
3832 struct cm_port *port;
3833 u16 attr_index;
3834
3835 port = mad_agent->context;
3836 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3837 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3838
3839 /*
3840 * If the send was in response to a received message (context[0] is not
3841 * set to a cm_id), and is not a REJ, then it is a send that was
3842 * manually retried.
3843 */
3844 if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3845 msg->retries = 1;
3846
3847 atomic_long_add(i: 1 + msg->retries, v: &port->counters[CM_XMIT][attr_index]);
3848 if (msg->retries)
3849 atomic_long_add(i: msg->retries,
3850 v: &port->counters[CM_XMIT_RETRIES][attr_index]);
3851
3852 if (cm_id_priv)
3853 cm_process_send_error(cm_id_priv, msg, state,
3854 wc_status: mad_send_wc->status);
3855 else
3856 cm_free_response_msg(msg);
3857}
3858
3859static void cm_work_handler(struct work_struct *_work)
3860{
3861 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3862 int ret;
3863
3864 switch (work->cm_event.event) {
3865 case IB_CM_REQ_RECEIVED:
3866 ret = cm_req_handler(work);
3867 break;
3868 case IB_CM_MRA_RECEIVED:
3869 ret = cm_mra_handler(work);
3870 break;
3871 case IB_CM_REJ_RECEIVED:
3872 ret = cm_rej_handler(work);
3873 break;
3874 case IB_CM_REP_RECEIVED:
3875 ret = cm_rep_handler(work);
3876 break;
3877 case IB_CM_RTU_RECEIVED:
3878 ret = cm_rtu_handler(work);
3879 break;
3880 case IB_CM_USER_ESTABLISHED:
3881 ret = cm_establish_handler(work);
3882 break;
3883 case IB_CM_DREQ_RECEIVED:
3884 ret = cm_dreq_handler(work);
3885 break;
3886 case IB_CM_DREP_RECEIVED:
3887 ret = cm_drep_handler(work);
3888 break;
3889 case IB_CM_SIDR_REQ_RECEIVED:
3890 ret = cm_sidr_req_handler(work);
3891 break;
3892 case IB_CM_SIDR_REP_RECEIVED:
3893 ret = cm_sidr_rep_handler(work);
3894 break;
3895 case IB_CM_LAP_RECEIVED:
3896 ret = cm_lap_handler(work);
3897 break;
3898 case IB_CM_APR_RECEIVED:
3899 ret = cm_apr_handler(work);
3900 break;
3901 case IB_CM_TIMEWAIT_EXIT:
3902 ret = cm_timewait_handler(work);
3903 break;
3904 default:
3905 trace_icm_handler_err(event: work->cm_event.event);
3906 ret = -EINVAL;
3907 break;
3908 }
3909 if (ret)
3910 cm_free_work(work);
3911}
3912
3913static int cm_establish(struct ib_cm_id *cm_id)
3914{
3915 struct cm_id_private *cm_id_priv;
3916 struct cm_work *work;
3917 unsigned long flags;
3918 int ret = 0;
3919 struct cm_device *cm_dev;
3920
3921 cm_dev = ib_get_client_data(device: cm_id->device, client: &cm_client);
3922 if (!cm_dev)
3923 return -ENODEV;
3924
3925 work = kmalloc(size: sizeof *work, GFP_ATOMIC);
3926 if (!work)
3927 return -ENOMEM;
3928
3929 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3930 spin_lock_irqsave(&cm_id_priv->lock, flags);
3931 switch (cm_id->state) {
3932 case IB_CM_REP_SENT:
3933 case IB_CM_MRA_REP_RCVD:
3934 cm_id->state = IB_CM_ESTABLISHED;
3935 break;
3936 case IB_CM_ESTABLISHED:
3937 ret = -EISCONN;
3938 break;
3939 default:
3940 trace_icm_establish_err(cm_id);
3941 ret = -EINVAL;
3942 break;
3943 }
3944 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3945
3946 if (ret) {
3947 kfree(objp: work);
3948 goto out;
3949 }
3950
3951 /*
3952 * The CM worker thread may try to destroy the cm_id before it
3953 * can execute this work item. To prevent potential deadlock,
3954 * we need to find the cm_id once we're in the context of the
3955 * worker thread, rather than holding a reference on it.
3956 */
3957 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3958 work->local_id = cm_id->local_id;
3959 work->remote_id = cm_id->remote_id;
3960 work->mad_recv_wc = NULL;
3961 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3962
3963 /* Check if the device started its remove_one */
3964 spin_lock_irqsave(&cm.lock, flags);
3965 if (!cm_dev->going_down) {
3966 queue_delayed_work(wq: cm.wq, dwork: &work->work, delay: 0);
3967 } else {
3968 kfree(objp: work);
3969 ret = -ENODEV;
3970 }
3971 spin_unlock_irqrestore(lock: &cm.lock, flags);
3972
3973out:
3974 return ret;
3975}
3976
3977static int cm_migrate(struct ib_cm_id *cm_id)
3978{
3979 struct cm_id_private *cm_id_priv;
3980 unsigned long flags;
3981 int ret = 0;
3982
3983 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3984 spin_lock_irqsave(&cm_id_priv->lock, flags);
3985 if (cm_id->state == IB_CM_ESTABLISHED &&
3986 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3987 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3988 cm_id->lap_state = IB_CM_LAP_IDLE;
3989 cm_id_priv->av = cm_id_priv->alt_av;
3990 } else
3991 ret = -EINVAL;
3992 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
3993
3994 return ret;
3995}
3996
3997int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3998{
3999 int ret;
4000
4001 switch (event) {
4002 case IB_EVENT_COMM_EST:
4003 ret = cm_establish(cm_id);
4004 break;
4005 case IB_EVENT_PATH_MIG:
4006 ret = cm_migrate(cm_id);
4007 break;
4008 default:
4009 ret = -EINVAL;
4010 }
4011 return ret;
4012}
4013EXPORT_SYMBOL(ib_cm_notify);
4014
4015static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4016 struct ib_mad_send_buf *send_buf,
4017 struct ib_mad_recv_wc *mad_recv_wc)
4018{
4019 struct cm_port *port = mad_agent->context;
4020 struct cm_work *work;
4021 enum ib_cm_event_type event;
4022 bool alt_path = false;
4023 u16 attr_id;
4024 int paths = 0;
4025 int going_down = 0;
4026
4027 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4028 case CM_REQ_ATTR_ID:
4029 alt_path = cm_req_has_alt_path(req_msg: (struct cm_req_msg *)
4030 mad_recv_wc->recv_buf.mad);
4031 paths = 1 + (alt_path != 0);
4032 event = IB_CM_REQ_RECEIVED;
4033 break;
4034 case CM_MRA_ATTR_ID:
4035 event = IB_CM_MRA_RECEIVED;
4036 break;
4037 case CM_REJ_ATTR_ID:
4038 event = IB_CM_REJ_RECEIVED;
4039 break;
4040 case CM_REP_ATTR_ID:
4041 event = IB_CM_REP_RECEIVED;
4042 break;
4043 case CM_RTU_ATTR_ID:
4044 event = IB_CM_RTU_RECEIVED;
4045 break;
4046 case CM_DREQ_ATTR_ID:
4047 event = IB_CM_DREQ_RECEIVED;
4048 break;
4049 case CM_DREP_ATTR_ID:
4050 event = IB_CM_DREP_RECEIVED;
4051 break;
4052 case CM_SIDR_REQ_ATTR_ID:
4053 event = IB_CM_SIDR_REQ_RECEIVED;
4054 break;
4055 case CM_SIDR_REP_ATTR_ID:
4056 event = IB_CM_SIDR_REP_RECEIVED;
4057 break;
4058 case CM_LAP_ATTR_ID:
4059 paths = 1;
4060 event = IB_CM_LAP_RECEIVED;
4061 break;
4062 case CM_APR_ATTR_ID:
4063 event = IB_CM_APR_RECEIVED;
4064 break;
4065 default:
4066 ib_free_recv_mad(mad_recv_wc);
4067 return;
4068 }
4069
4070 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4071 atomic_long_inc(v: &port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4072
4073 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4074 if (!work) {
4075 ib_free_recv_mad(mad_recv_wc);
4076 return;
4077 }
4078
4079 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4080 work->cm_event.event = event;
4081 work->mad_recv_wc = mad_recv_wc;
4082 work->port = port;
4083
4084 /* Check if the device started its remove_one */
4085 spin_lock_irq(lock: &cm.lock);
4086 if (!port->cm_dev->going_down)
4087 queue_delayed_work(wq: cm.wq, dwork: &work->work, delay: 0);
4088 else
4089 going_down = 1;
4090 spin_unlock_irq(lock: &cm.lock);
4091
4092 if (going_down) {
4093 kfree(objp: work);
4094 ib_free_recv_mad(mad_recv_wc);
4095 }
4096}
4097
4098static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4099 struct ib_qp_attr *qp_attr,
4100 int *qp_attr_mask)
4101{
4102 unsigned long flags;
4103 int ret;
4104
4105 spin_lock_irqsave(&cm_id_priv->lock, flags);
4106 switch (cm_id_priv->id.state) {
4107 case IB_CM_REQ_SENT:
4108 case IB_CM_MRA_REQ_RCVD:
4109 case IB_CM_REQ_RCVD:
4110 case IB_CM_MRA_REQ_SENT:
4111 case IB_CM_REP_RCVD:
4112 case IB_CM_MRA_REP_SENT:
4113 case IB_CM_REP_SENT:
4114 case IB_CM_MRA_REP_RCVD:
4115 case IB_CM_ESTABLISHED:
4116 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4117 IB_QP_PKEY_INDEX | IB_QP_PORT;
4118 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4119 if (cm_id_priv->responder_resources) {
4120 struct ib_device *ib_dev = cm_id_priv->id.device;
4121 u64 support_flush = ib_dev->attrs.device_cap_flags &
4122 (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
4123 u32 flushable = support_flush ?
4124 (IB_ACCESS_FLUSH_GLOBAL |
4125 IB_ACCESS_FLUSH_PERSISTENT) : 0;
4126
4127 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4128 IB_ACCESS_REMOTE_ATOMIC |
4129 flushable;
4130 }
4131 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4132 if (cm_id_priv->av.port)
4133 qp_attr->port_num = cm_id_priv->av.port->port_num;
4134 ret = 0;
4135 break;
4136 default:
4137 trace_icm_qp_init_err(cm_id: &cm_id_priv->id);
4138 ret = -EINVAL;
4139 break;
4140 }
4141 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
4142 return ret;
4143}
4144
4145static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4146 struct ib_qp_attr *qp_attr,
4147 int *qp_attr_mask)
4148{
4149 unsigned long flags;
4150 int ret;
4151
4152 spin_lock_irqsave(&cm_id_priv->lock, flags);
4153 switch (cm_id_priv->id.state) {
4154 case IB_CM_REQ_RCVD:
4155 case IB_CM_MRA_REQ_SENT:
4156 case IB_CM_REP_RCVD:
4157 case IB_CM_MRA_REP_SENT:
4158 case IB_CM_REP_SENT:
4159 case IB_CM_MRA_REP_RCVD:
4160 case IB_CM_ESTABLISHED:
4161 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4162 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4163 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4164 if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
4165 cm_id_priv->av.dlid_datapath &&
4166 (cm_id_priv->av.dlid_datapath != 0xffff))
4167 qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
4168 qp_attr->path_mtu = cm_id_priv->path_mtu;
4169 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4170 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4171 if (cm_id_priv->qp_type == IB_QPT_RC ||
4172 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4173 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4174 IB_QP_MIN_RNR_TIMER;
4175 qp_attr->max_dest_rd_atomic =
4176 cm_id_priv->responder_resources;
4177 qp_attr->min_rnr_timer = 0;
4178 }
4179 if (rdma_ah_get_dlid(attr: &cm_id_priv->alt_av.ah_attr) &&
4180 cm_id_priv->alt_av.port) {
4181 *qp_attr_mask |= IB_QP_ALT_PATH;
4182 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4183 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4184 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4185 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4186 }
4187 ret = 0;
4188 break;
4189 default:
4190 trace_icm_qp_rtr_err(cm_id: &cm_id_priv->id);
4191 ret = -EINVAL;
4192 break;
4193 }
4194 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
4195 return ret;
4196}
4197
4198static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4199 struct ib_qp_attr *qp_attr,
4200 int *qp_attr_mask)
4201{
4202 unsigned long flags;
4203 int ret;
4204
4205 spin_lock_irqsave(&cm_id_priv->lock, flags);
4206 switch (cm_id_priv->id.state) {
4207 /* Allow transition to RTS before sending REP */
4208 case IB_CM_REQ_RCVD:
4209 case IB_CM_MRA_REQ_SENT:
4210
4211 case IB_CM_REP_RCVD:
4212 case IB_CM_MRA_REP_SENT:
4213 case IB_CM_REP_SENT:
4214 case IB_CM_MRA_REP_RCVD:
4215 case IB_CM_ESTABLISHED:
4216 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4217 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4218 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4219 switch (cm_id_priv->qp_type) {
4220 case IB_QPT_RC:
4221 case IB_QPT_XRC_INI:
4222 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4223 IB_QP_MAX_QP_RD_ATOMIC;
4224 qp_attr->retry_cnt = cm_id_priv->retry_count;
4225 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4226 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4227 fallthrough;
4228 case IB_QPT_XRC_TGT:
4229 *qp_attr_mask |= IB_QP_TIMEOUT;
4230 qp_attr->timeout = cm_id_priv->av.timeout;
4231 break;
4232 default:
4233 break;
4234 }
4235 if (rdma_ah_get_dlid(attr: &cm_id_priv->alt_av.ah_attr)) {
4236 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4237 qp_attr->path_mig_state = IB_MIG_REARM;
4238 }
4239 } else {
4240 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4241 if (cm_id_priv->alt_av.port)
4242 qp_attr->alt_port_num =
4243 cm_id_priv->alt_av.port->port_num;
4244 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4245 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4246 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4247 qp_attr->path_mig_state = IB_MIG_REARM;
4248 }
4249 ret = 0;
4250 break;
4251 default:
4252 trace_icm_qp_rts_err(cm_id: &cm_id_priv->id);
4253 ret = -EINVAL;
4254 break;
4255 }
4256 spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags);
4257 return ret;
4258}
4259
4260int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4261 struct ib_qp_attr *qp_attr,
4262 int *qp_attr_mask)
4263{
4264 struct cm_id_private *cm_id_priv;
4265 int ret;
4266
4267 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4268 switch (qp_attr->qp_state) {
4269 case IB_QPS_INIT:
4270 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4271 break;
4272 case IB_QPS_RTR:
4273 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4274 break;
4275 case IB_QPS_RTS:
4276 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4277 break;
4278 default:
4279 ret = -EINVAL;
4280 break;
4281 }
4282 return ret;
4283}
4284EXPORT_SYMBOL(ib_cm_init_qp_attr);
4285
4286static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4287 struct ib_port_attribute *attr, char *buf)
4288{
4289 struct cm_counter_attribute *cm_attr =
4290 container_of(attr, struct cm_counter_attribute, attr);
4291 struct cm_device *cm_dev = ib_get_client_data(device: ibdev, client: &cm_client);
4292
4293 if (WARN_ON(!cm_dev))
4294 return -EINVAL;
4295
4296 return sysfs_emit(
4297 buf, fmt: "%ld\n",
4298 atomic_long_read(
4299 v: &cm_dev->port[port_num - 1]
4300 ->counters[cm_attr->group][cm_attr->index]));
4301}
4302
4303#define CM_COUNTER_ATTR(_name, _group, _index) \
4304 { \
4305 .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \
4306 .group = _group, .index = _index \
4307 }
4308
4309#define CM_COUNTER_GROUP(_group, _name) \
4310 static struct cm_counter_attribute cm_counter_attr_##_group[] = { \
4311 CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \
4312 CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \
4313 CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \
4314 CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \
4315 CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \
4316 CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \
4317 CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \
4318 CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \
4319 CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \
4320 CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \
4321 CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \
4322 }; \
4323 static struct attribute *cm_counter_attrs_##_group[] = { \
4324 &cm_counter_attr_##_group[0].attr.attr, \
4325 &cm_counter_attr_##_group[1].attr.attr, \
4326 &cm_counter_attr_##_group[2].attr.attr, \
4327 &cm_counter_attr_##_group[3].attr.attr, \
4328 &cm_counter_attr_##_group[4].attr.attr, \
4329 &cm_counter_attr_##_group[5].attr.attr, \
4330 &cm_counter_attr_##_group[6].attr.attr, \
4331 &cm_counter_attr_##_group[7].attr.attr, \
4332 &cm_counter_attr_##_group[8].attr.attr, \
4333 &cm_counter_attr_##_group[9].attr.attr, \
4334 &cm_counter_attr_##_group[10].attr.attr, \
4335 NULL, \
4336 }; \
4337 static const struct attribute_group cm_counter_group_##_group = { \
4338 .name = _name, \
4339 .attrs = cm_counter_attrs_##_group, \
4340 };
4341
4342CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4343CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4344CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4345CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4346
4347static const struct attribute_group *cm_counter_groups[] = {
4348 &cm_counter_group_CM_XMIT,
4349 &cm_counter_group_CM_XMIT_RETRIES,
4350 &cm_counter_group_CM_RECV,
4351 &cm_counter_group_CM_RECV_DUPLICATES,
4352 NULL,
4353};
4354
4355static int cm_add_one(struct ib_device *ib_device)
4356{
4357 struct cm_device *cm_dev;
4358 struct cm_port *port;
4359 struct ib_mad_reg_req reg_req = {
4360 .mgmt_class = IB_MGMT_CLASS_CM,
4361 .mgmt_class_version = IB_CM_CLASS_VERSION,
4362 };
4363 struct ib_port_modify port_modify = {
4364 .set_port_cap_mask = IB_PORT_CM_SUP
4365 };
4366 unsigned long flags;
4367 int ret;
4368 int count = 0;
4369 u32 i;
4370
4371 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4372 GFP_KERNEL);
4373 if (!cm_dev)
4374 return -ENOMEM;
4375
4376 kref_init(kref: &cm_dev->kref);
4377 spin_lock_init(&cm_dev->mad_agent_lock);
4378 cm_dev->ib_device = ib_device;
4379 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4380 cm_dev->going_down = 0;
4381
4382 ib_set_client_data(device: ib_device, client: &cm_client, data: cm_dev);
4383
4384 set_bit(IB_MGMT_METHOD_SEND, addr: reg_req.method_mask);
4385 rdma_for_each_port (ib_device, i) {
4386 if (!rdma_cap_ib_cm(device: ib_device, port_num: i))
4387 continue;
4388
4389 port = kzalloc(size: sizeof *port, GFP_KERNEL);
4390 if (!port) {
4391 ret = -ENOMEM;
4392 goto error1;
4393 }
4394
4395 cm_dev->port[i-1] = port;
4396 port->cm_dev = cm_dev;
4397 port->port_num = i;
4398
4399 ret = ib_port_register_client_groups(ibdev: ib_device, port_num: i,
4400 groups: cm_counter_groups);
4401 if (ret)
4402 goto error1;
4403
4404 port->mad_agent = ib_register_mad_agent(device: ib_device, port_num: i,
4405 qp_type: IB_QPT_GSI,
4406 mad_reg_req: &reg_req,
4407 rmpp_version: 0,
4408 send_handler: cm_send_handler,
4409 recv_handler: cm_recv_handler,
4410 context: port,
4411 registration_flags: 0);
4412 if (IS_ERR(ptr: port->mad_agent)) {
4413 ret = PTR_ERR(ptr: port->mad_agent);
4414 goto error2;
4415 }
4416
4417 ret = ib_modify_port(device: ib_device, port_num: i, port_modify_mask: 0, port_modify: &port_modify);
4418 if (ret)
4419 goto error3;
4420
4421 count++;
4422 }
4423
4424 if (!count) {
4425 ret = -EOPNOTSUPP;
4426 goto free;
4427 }
4428
4429 write_lock_irqsave(&cm.device_lock, flags);
4430 list_add_tail(new: &cm_dev->list, head: &cm.device_list);
4431 write_unlock_irqrestore(&cm.device_lock, flags);
4432 return 0;
4433
4434error3:
4435 ib_unregister_mad_agent(mad_agent: port->mad_agent);
4436error2:
4437 ib_port_unregister_client_groups(ibdev: ib_device, port_num: i, groups: cm_counter_groups);
4438error1:
4439 port_modify.set_port_cap_mask = 0;
4440 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4441 while (--i) {
4442 if (!rdma_cap_ib_cm(device: ib_device, port_num: i))
4443 continue;
4444
4445 port = cm_dev->port[i-1];
4446 ib_modify_port(device: ib_device, port_num: port->port_num, port_modify_mask: 0, port_modify: &port_modify);
4447 ib_unregister_mad_agent(mad_agent: port->mad_agent);
4448 ib_port_unregister_client_groups(ibdev: ib_device, port_num: i,
4449 groups: cm_counter_groups);
4450 }
4451free:
4452 cm_device_put(cm_dev);
4453 return ret;
4454}
4455
4456static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4457{
4458 struct cm_device *cm_dev = client_data;
4459 struct cm_port *port;
4460 struct ib_port_modify port_modify = {
4461 .clr_port_cap_mask = IB_PORT_CM_SUP
4462 };
4463 unsigned long flags;
4464 u32 i;
4465
4466 write_lock_irqsave(&cm.device_lock, flags);
4467 list_del(entry: &cm_dev->list);
4468 write_unlock_irqrestore(&cm.device_lock, flags);
4469
4470 spin_lock_irq(lock: &cm.lock);
4471 cm_dev->going_down = 1;
4472 spin_unlock_irq(lock: &cm.lock);
4473
4474 rdma_for_each_port (ib_device, i) {
4475 struct ib_mad_agent *mad_agent;
4476
4477 if (!rdma_cap_ib_cm(device: ib_device, port_num: i))
4478 continue;
4479
4480 port = cm_dev->port[i-1];
4481 mad_agent = port->mad_agent;
4482 ib_modify_port(device: ib_device, port_num: port->port_num, port_modify_mask: 0, port_modify: &port_modify);
4483 /*
4484 * We flush the queue here after the going_down set, this
4485 * verify that no new works will be queued in the recv handler,
4486 * after that we can call the unregister_mad_agent
4487 */
4488 flush_workqueue(cm.wq);
4489 /*
4490 * The above ensures no call paths from the work are running,
4491 * the remaining paths all take the mad_agent_lock.
4492 */
4493 spin_lock(lock: &cm_dev->mad_agent_lock);
4494 port->mad_agent = NULL;
4495 spin_unlock(lock: &cm_dev->mad_agent_lock);
4496 ib_unregister_mad_agent(mad_agent);
4497 ib_port_unregister_client_groups(ibdev: ib_device, port_num: i,
4498 groups: cm_counter_groups);
4499 }
4500
4501 cm_device_put(cm_dev);
4502}
4503
4504static int __init ib_cm_init(void)
4505{
4506 int ret;
4507
4508 INIT_LIST_HEAD(list: &cm.device_list);
4509 rwlock_init(&cm.device_lock);
4510 spin_lock_init(&cm.lock);
4511 cm.listen_service_table = RB_ROOT;
4512 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4513 cm.remote_id_table = RB_ROOT;
4514 cm.remote_qp_table = RB_ROOT;
4515 cm.remote_sidr_table = RB_ROOT;
4516 xa_init_flags(xa: &cm.local_id_table, XA_FLAGS_ALLOC);
4517 get_random_bytes(buf: &cm.random_id_operand, len: sizeof cm.random_id_operand);
4518 INIT_LIST_HEAD(list: &cm.timewait_list);
4519
4520 cm.wq = alloc_workqueue(fmt: "ib_cm", flags: 0, max_active: 1);
4521 if (!cm.wq) {
4522 ret = -ENOMEM;
4523 goto error2;
4524 }
4525
4526 ret = ib_register_client(client: &cm_client);
4527 if (ret)
4528 goto error3;
4529
4530 return 0;
4531error3:
4532 destroy_workqueue(wq: cm.wq);
4533error2:
4534 return ret;
4535}
4536
4537static void __exit ib_cm_cleanup(void)
4538{
4539 struct cm_timewait_info *timewait_info, *tmp;
4540
4541 spin_lock_irq(lock: &cm.lock);
4542 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4543 cancel_delayed_work(dwork: &timewait_info->work.work);
4544 spin_unlock_irq(lock: &cm.lock);
4545
4546 ib_unregister_client(client: &cm_client);
4547 destroy_workqueue(wq: cm.wq);
4548
4549 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4550 list_del(entry: &timewait_info->list);
4551 kfree(objp: timewait_info);
4552 }
4553
4554 WARN_ON(!xa_empty(&cm.local_id_table));
4555}
4556
4557module_init(ib_cm_init);
4558module_exit(ib_cm_cleanup);
4559

source code of linux/drivers/infiniband/core/cm.c