1 | /* |
2 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. |
5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. |
7 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. |
8 | * |
9 | * This software is available to you under a choice of one of two |
10 | * licenses. You may choose to be licensed under the terms of the GNU |
11 | * General Public License (GPL) Version 2, available from the file |
12 | * COPYING in the main directory of this source tree, or the |
13 | * OpenIB.org BSD license below: |
14 | * |
15 | * Redistribution and use in source and binary forms, with or |
16 | * without modification, are permitted provided that the following |
17 | * conditions are met: |
18 | * |
19 | * - Redistributions of source code must retain the above |
20 | * copyright notice, this list of conditions and the following |
21 | * disclaimer. |
22 | * |
23 | * - Redistributions in binary form must reproduce the above |
24 | * copyright notice, this list of conditions and the following |
25 | * disclaimer in the documentation and/or other materials |
26 | * provided with the distribution. |
27 | * |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
35 | * SOFTWARE. |
36 | * |
37 | */ |
38 | #include <linux/dma-mapping.h> |
39 | #include <linux/err.h> |
40 | #include <linux/idr.h> |
41 | #include <linux/interrupt.h> |
42 | #include <linux/rbtree.h> |
43 | #include <linux/sched.h> |
44 | #include <linux/spinlock.h> |
45 | #include <linux/workqueue.h> |
46 | #include <linux/completion.h> |
47 | #include <linux/slab.h> |
48 | #include <linux/module.h> |
49 | #include <linux/sysctl.h> |
50 | |
51 | #include <rdma/iw_cm.h> |
52 | #include <rdma/ib_addr.h> |
53 | #include <rdma/iw_portmap.h> |
54 | #include <rdma/rdma_netlink.h> |
55 | |
56 | #include "iwcm.h" |
57 | |
58 | MODULE_AUTHOR("Tom Tucker" ); |
59 | MODULE_DESCRIPTION("iWARP CM" ); |
60 | MODULE_LICENSE("Dual BSD/GPL" ); |
61 | |
62 | static const char * const iwcm_rej_reason_strs[] = { |
63 | [ECONNRESET] = "reset by remote host" , |
64 | [ECONNREFUSED] = "refused by remote application" , |
65 | [ETIMEDOUT] = "setup timeout" , |
66 | }; |
67 | |
68 | const char *__attribute_const__ iwcm_reject_msg(int reason) |
69 | { |
70 | size_t index; |
71 | |
72 | /* iWARP uses negative errnos */ |
73 | index = -reason; |
74 | |
75 | if (index < ARRAY_SIZE(iwcm_rej_reason_strs) && |
76 | iwcm_rej_reason_strs[index]) |
77 | return iwcm_rej_reason_strs[index]; |
78 | else |
79 | return "unrecognized reason" ; |
80 | } |
81 | EXPORT_SYMBOL(iwcm_reject_msg); |
82 | |
83 | static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { |
84 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, |
85 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
86 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
87 | [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, |
88 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, |
89 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, |
90 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}, |
91 | [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb} |
92 | }; |
93 | |
94 | static struct workqueue_struct *iwcm_wq; |
95 | struct iwcm_work { |
96 | struct work_struct work; |
97 | struct iwcm_id_private *cm_id; |
98 | struct list_head list; |
99 | struct iw_cm_event event; |
100 | struct list_head free_list; |
101 | }; |
102 | |
103 | static unsigned int default_backlog = 256; |
104 | |
105 | static struct ctl_table_header *iwcm_ctl_table_hdr; |
106 | static struct ctl_table iwcm_ctl_table[] = { |
107 | { |
108 | .procname = "default_backlog" , |
109 | .data = &default_backlog, |
110 | .maxlen = sizeof(default_backlog), |
111 | .mode = 0644, |
112 | .proc_handler = proc_dointvec, |
113 | }, |
114 | }; |
115 | |
116 | /* |
117 | * The following services provide a mechanism for pre-allocating iwcm_work |
118 | * elements. The design pre-allocates them based on the cm_id type: |
119 | * LISTENING IDS: Get enough elements preallocated to handle the |
120 | * listen backlog. |
121 | * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE |
122 | * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE |
123 | * |
124 | * Allocating them in connect and listen avoids having to deal |
125 | * with allocation failures on the event upcall from the provider (which |
126 | * is called in the interrupt context). |
127 | * |
128 | * One exception is when creating the cm_id for incoming connection requests. |
129 | * There are two cases: |
130 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If |
131 | * the backlog is exceeded, then no more connection request events will |
132 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up |
133 | * to the provider to reject the connection request. |
134 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). |
135 | * If work elements cannot be allocated for the new connect request cm_id, |
136 | * then IWCM will call the provider reject method. This is ok since |
137 | * cm_conn_req_handler() runs in the workqueue thread context. |
138 | */ |
139 | |
140 | static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) |
141 | { |
142 | struct iwcm_work *work; |
143 | |
144 | if (list_empty(head: &cm_id_priv->work_free_list)) |
145 | return NULL; |
146 | work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, |
147 | free_list); |
148 | list_del_init(entry: &work->free_list); |
149 | return work; |
150 | } |
151 | |
152 | static void put_work(struct iwcm_work *work) |
153 | { |
154 | list_add(new: &work->free_list, head: &work->cm_id->work_free_list); |
155 | } |
156 | |
157 | static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) |
158 | { |
159 | struct list_head *e, *tmp; |
160 | |
161 | list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) { |
162 | list_del(entry: e); |
163 | kfree(list_entry(e, struct iwcm_work, free_list)); |
164 | } |
165 | } |
166 | |
167 | static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) |
168 | { |
169 | struct iwcm_work *work; |
170 | |
171 | BUG_ON(!list_empty(&cm_id_priv->work_free_list)); |
172 | while (count--) { |
173 | work = kmalloc(size: sizeof(struct iwcm_work), GFP_KERNEL); |
174 | if (!work) { |
175 | dealloc_work_entries(cm_id_priv); |
176 | return -ENOMEM; |
177 | } |
178 | work->cm_id = cm_id_priv; |
179 | INIT_LIST_HEAD(list: &work->list); |
180 | put_work(work); |
181 | } |
182 | return 0; |
183 | } |
184 | |
185 | /* |
186 | * Save private data from incoming connection requests to |
187 | * iw_cm_event, so the low level driver doesn't have to. Adjust |
188 | * the event ptr to point to the local copy. |
189 | */ |
190 | static int copy_private_data(struct iw_cm_event *event) |
191 | { |
192 | void *p; |
193 | |
194 | p = kmemdup(p: event->private_data, size: event->private_data_len, GFP_ATOMIC); |
195 | if (!p) |
196 | return -ENOMEM; |
197 | event->private_data = p; |
198 | return 0; |
199 | } |
200 | |
201 | static void free_cm_id(struct iwcm_id_private *cm_id_priv) |
202 | { |
203 | dealloc_work_entries(cm_id_priv); |
204 | kfree(objp: cm_id_priv); |
205 | } |
206 | |
207 | /* |
208 | * Release a reference on cm_id. If the last reference is being |
209 | * released, free the cm_id and return 1. |
210 | */ |
211 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) |
212 | { |
213 | if (refcount_dec_and_test(r: &cm_id_priv->refcount)) { |
214 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
215 | free_cm_id(cm_id_priv); |
216 | return 1; |
217 | } |
218 | |
219 | return 0; |
220 | } |
221 | |
222 | static void add_ref(struct iw_cm_id *cm_id) |
223 | { |
224 | struct iwcm_id_private *cm_id_priv; |
225 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
226 | refcount_inc(r: &cm_id_priv->refcount); |
227 | } |
228 | |
229 | static void rem_ref(struct iw_cm_id *cm_id) |
230 | { |
231 | struct iwcm_id_private *cm_id_priv; |
232 | |
233 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
234 | |
235 | (void)iwcm_deref_id(cm_id_priv); |
236 | } |
237 | |
238 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); |
239 | |
240 | struct iw_cm_id *iw_create_cm_id(struct ib_device *device, |
241 | iw_cm_handler cm_handler, |
242 | void *context) |
243 | { |
244 | struct iwcm_id_private *cm_id_priv; |
245 | |
246 | cm_id_priv = kzalloc(size: sizeof(*cm_id_priv), GFP_KERNEL); |
247 | if (!cm_id_priv) |
248 | return ERR_PTR(error: -ENOMEM); |
249 | |
250 | cm_id_priv->state = IW_CM_STATE_IDLE; |
251 | cm_id_priv->id.device = device; |
252 | cm_id_priv->id.cm_handler = cm_handler; |
253 | cm_id_priv->id.context = context; |
254 | cm_id_priv->id.event_handler = cm_event_handler; |
255 | cm_id_priv->id.add_ref = add_ref; |
256 | cm_id_priv->id.rem_ref = rem_ref; |
257 | spin_lock_init(&cm_id_priv->lock); |
258 | refcount_set(r: &cm_id_priv->refcount, n: 1); |
259 | init_waitqueue_head(&cm_id_priv->connect_wait); |
260 | init_completion(x: &cm_id_priv->destroy_comp); |
261 | INIT_LIST_HEAD(list: &cm_id_priv->work_list); |
262 | INIT_LIST_HEAD(list: &cm_id_priv->work_free_list); |
263 | |
264 | return &cm_id_priv->id; |
265 | } |
266 | EXPORT_SYMBOL(iw_create_cm_id); |
267 | |
268 | |
269 | static int iwcm_modify_qp_err(struct ib_qp *qp) |
270 | { |
271 | struct ib_qp_attr qp_attr; |
272 | |
273 | if (!qp) |
274 | return -EINVAL; |
275 | |
276 | qp_attr.qp_state = IB_QPS_ERR; |
277 | return ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask: IB_QP_STATE); |
278 | } |
279 | |
280 | /* |
281 | * This is really the RDMAC CLOSING state. It is most similar to the |
282 | * IB SQD QP state. |
283 | */ |
284 | static int iwcm_modify_qp_sqd(struct ib_qp *qp) |
285 | { |
286 | struct ib_qp_attr qp_attr; |
287 | |
288 | BUG_ON(qp == NULL); |
289 | qp_attr.qp_state = IB_QPS_SQD; |
290 | return ib_modify_qp(qp, qp_attr: &qp_attr, qp_attr_mask: IB_QP_STATE); |
291 | } |
292 | |
293 | /* |
294 | * CM_ID <-- CLOSING |
295 | * |
296 | * Block if a passive or active connection is currently being processed. Then |
297 | * process the event as follows: |
298 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state |
299 | * based on the abrupt flag |
300 | * - If the connection is already in the CLOSING or IDLE state, the peer is |
301 | * disconnecting concurrently with us and we've already seen the |
302 | * DISCONNECT event -- ignore the request and return 0 |
303 | * - Disconnect on a listening endpoint returns -EINVAL |
304 | */ |
305 | int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) |
306 | { |
307 | struct iwcm_id_private *cm_id_priv; |
308 | unsigned long flags; |
309 | int ret = 0; |
310 | struct ib_qp *qp = NULL; |
311 | |
312 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
313 | /* Wait if we're currently in a connect or accept downcall */ |
314 | wait_event(cm_id_priv->connect_wait, |
315 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); |
316 | |
317 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
318 | switch (cm_id_priv->state) { |
319 | case IW_CM_STATE_ESTABLISHED: |
320 | cm_id_priv->state = IW_CM_STATE_CLOSING; |
321 | |
322 | /* QP could be <nul> for user-mode client */ |
323 | if (cm_id_priv->qp) |
324 | qp = cm_id_priv->qp; |
325 | else |
326 | ret = -EINVAL; |
327 | break; |
328 | case IW_CM_STATE_LISTEN: |
329 | ret = -EINVAL; |
330 | break; |
331 | case IW_CM_STATE_CLOSING: |
332 | /* remote peer closed first */ |
333 | case IW_CM_STATE_IDLE: |
334 | /* accept or connect returned !0 */ |
335 | break; |
336 | case IW_CM_STATE_CONN_RECV: |
337 | /* |
338 | * App called disconnect before/without calling accept after |
339 | * connect_request event delivered. |
340 | */ |
341 | break; |
342 | case IW_CM_STATE_CONN_SENT: |
343 | /* Can only get here if wait above fails */ |
344 | default: |
345 | BUG(); |
346 | } |
347 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
348 | |
349 | if (qp) { |
350 | if (abrupt) |
351 | ret = iwcm_modify_qp_err(qp); |
352 | else |
353 | ret = iwcm_modify_qp_sqd(qp); |
354 | |
355 | /* |
356 | * If both sides are disconnecting the QP could |
357 | * already be in ERR or SQD states |
358 | */ |
359 | ret = 0; |
360 | } |
361 | |
362 | return ret; |
363 | } |
364 | EXPORT_SYMBOL(iw_cm_disconnect); |
365 | |
366 | /* |
367 | * CM_ID <-- DESTROYING |
368 | * |
369 | * Clean up all resources associated with the connection and release |
370 | * the initial reference taken by iw_create_cm_id. |
371 | */ |
372 | static void destroy_cm_id(struct iw_cm_id *cm_id) |
373 | { |
374 | struct iwcm_id_private *cm_id_priv; |
375 | struct ib_qp *qp; |
376 | unsigned long flags; |
377 | |
378 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
379 | /* |
380 | * Wait if we're currently in a connect or accept downcall. A |
381 | * listening endpoint should never block here. |
382 | */ |
383 | wait_event(cm_id_priv->connect_wait, |
384 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); |
385 | |
386 | /* |
387 | * Since we're deleting the cm_id, drop any events that |
388 | * might arrive before the last dereference. |
389 | */ |
390 | set_bit(IWCM_F_DROP_EVENTS, addr: &cm_id_priv->flags); |
391 | |
392 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
393 | qp = cm_id_priv->qp; |
394 | cm_id_priv->qp = NULL; |
395 | |
396 | switch (cm_id_priv->state) { |
397 | case IW_CM_STATE_LISTEN: |
398 | cm_id_priv->state = IW_CM_STATE_DESTROYING; |
399 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
400 | /* destroy the listening endpoint */ |
401 | cm_id->device->ops.iw_destroy_listen(cm_id); |
402 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
403 | break; |
404 | case IW_CM_STATE_ESTABLISHED: |
405 | cm_id_priv->state = IW_CM_STATE_DESTROYING; |
406 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
407 | /* Abrupt close of the connection */ |
408 | (void)iwcm_modify_qp_err(qp); |
409 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
410 | break; |
411 | case IW_CM_STATE_IDLE: |
412 | case IW_CM_STATE_CLOSING: |
413 | cm_id_priv->state = IW_CM_STATE_DESTROYING; |
414 | break; |
415 | case IW_CM_STATE_CONN_RECV: |
416 | /* |
417 | * App called destroy before/without calling accept after |
418 | * receiving connection request event notification or |
419 | * returned non zero from the event callback function. |
420 | * In either case, must tell the provider to reject. |
421 | */ |
422 | cm_id_priv->state = IW_CM_STATE_DESTROYING; |
423 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
424 | cm_id->device->ops.iw_reject(cm_id, NULL, 0); |
425 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
426 | break; |
427 | case IW_CM_STATE_CONN_SENT: |
428 | case IW_CM_STATE_DESTROYING: |
429 | default: |
430 | BUG(); |
431 | break; |
432 | } |
433 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
434 | if (qp) |
435 | cm_id_priv->id.device->ops.iw_rem_ref(qp); |
436 | |
437 | if (cm_id->mapped) { |
438 | iwpm_remove_mapinfo(local_addr: &cm_id->local_addr, mapped_addr: &cm_id->m_local_addr); |
439 | iwpm_remove_mapping(local_addr: &cm_id->local_addr, nl_client: RDMA_NL_IWCM); |
440 | } |
441 | |
442 | (void)iwcm_deref_id(cm_id_priv); |
443 | } |
444 | |
445 | /* |
446 | * This function is only called by the application thread and cannot |
447 | * be called by the event thread. The function will wait for all |
448 | * references to be released on the cm_id and then kfree the cm_id |
449 | * object. |
450 | */ |
451 | void iw_destroy_cm_id(struct iw_cm_id *cm_id) |
452 | { |
453 | destroy_cm_id(cm_id); |
454 | } |
455 | EXPORT_SYMBOL(iw_destroy_cm_id); |
456 | |
457 | /** |
458 | * iw_cm_check_wildcard - If IP address is 0 then use original |
459 | * @pm_addr: sockaddr containing the ip to check for wildcard |
460 | * @cm_addr: sockaddr containing the actual IP address |
461 | * @cm_outaddr: sockaddr to set IP addr which leaving port |
462 | * |
463 | * Checks the pm_addr for wildcard and then sets cm_outaddr's |
464 | * IP to the actual (cm_addr). |
465 | */ |
466 | static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr, |
467 | struct sockaddr_storage *cm_addr, |
468 | struct sockaddr_storage *cm_outaddr) |
469 | { |
470 | if (pm_addr->ss_family == AF_INET) { |
471 | struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr; |
472 | |
473 | if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) { |
474 | struct sockaddr_in *cm4_addr = |
475 | (struct sockaddr_in *)cm_addr; |
476 | struct sockaddr_in *cm4_outaddr = |
477 | (struct sockaddr_in *)cm_outaddr; |
478 | |
479 | cm4_outaddr->sin_addr = cm4_addr->sin_addr; |
480 | } |
481 | } else { |
482 | struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr; |
483 | |
484 | if (ipv6_addr_type(addr: &pm6_addr->sin6_addr) == IPV6_ADDR_ANY) { |
485 | struct sockaddr_in6 *cm6_addr = |
486 | (struct sockaddr_in6 *)cm_addr; |
487 | struct sockaddr_in6 *cm6_outaddr = |
488 | (struct sockaddr_in6 *)cm_outaddr; |
489 | |
490 | cm6_outaddr->sin6_addr = cm6_addr->sin6_addr; |
491 | } |
492 | } |
493 | } |
494 | |
495 | /** |
496 | * iw_cm_map - Use portmapper to map the ports |
497 | * @cm_id: connection manager pointer |
498 | * @active: Indicates the active side when true |
499 | * returns nonzero for error only if iwpm_create_mapinfo() fails |
500 | * |
501 | * Tries to add a mapping for a port using the Portmapper. If |
502 | * successful in mapping the IP/Port it will check the remote |
503 | * mapped IP address for a wildcard IP address and replace the |
504 | * zero IP address with the remote_addr. |
505 | */ |
506 | static int iw_cm_map(struct iw_cm_id *cm_id, bool active) |
507 | { |
508 | const char *devname = dev_name(dev: &cm_id->device->dev); |
509 | const char *ifname = cm_id->device->iw_ifname; |
510 | struct iwpm_dev_data pm_reg_msg = {}; |
511 | struct iwpm_sa_data pm_msg; |
512 | int status; |
513 | |
514 | if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) || |
515 | strlen(ifname) >= sizeof(pm_reg_msg.if_name)) |
516 | return -EINVAL; |
517 | |
518 | cm_id->m_local_addr = cm_id->local_addr; |
519 | cm_id->m_remote_addr = cm_id->remote_addr; |
520 | |
521 | strcpy(p: pm_reg_msg.dev_name, q: devname); |
522 | strcpy(p: pm_reg_msg.if_name, q: ifname); |
523 | |
524 | if (iwpm_register_pid(pm_msg: &pm_reg_msg, nl_client: RDMA_NL_IWCM) || |
525 | !iwpm_valid_pid()) |
526 | return 0; |
527 | |
528 | cm_id->mapped = true; |
529 | pm_msg.loc_addr = cm_id->local_addr; |
530 | pm_msg.rem_addr = cm_id->remote_addr; |
531 | pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ? |
532 | IWPM_FLAGS_NO_PORT_MAP : 0; |
533 | if (active) |
534 | status = iwpm_add_and_query_mapping(pm_msg: &pm_msg, |
535 | nl_client: RDMA_NL_IWCM); |
536 | else |
537 | status = iwpm_add_mapping(pm_msg: &pm_msg, nl_client: RDMA_NL_IWCM); |
538 | |
539 | if (!status) { |
540 | cm_id->m_local_addr = pm_msg.mapped_loc_addr; |
541 | if (active) { |
542 | cm_id->m_remote_addr = pm_msg.mapped_rem_addr; |
543 | iw_cm_check_wildcard(pm_addr: &pm_msg.mapped_rem_addr, |
544 | cm_addr: &cm_id->remote_addr, |
545 | cm_outaddr: &cm_id->m_remote_addr); |
546 | } |
547 | } |
548 | |
549 | return iwpm_create_mapinfo(local_addr: &cm_id->local_addr, |
550 | mapped_addr: &cm_id->m_local_addr, |
551 | nl_client: RDMA_NL_IWCM, map_flags: pm_msg.flags); |
552 | } |
553 | |
554 | /* |
555 | * CM_ID <-- LISTEN |
556 | * |
557 | * Start listening for connect requests. Generates one CONNECT_REQUEST |
558 | * event for each inbound connect request. |
559 | */ |
560 | int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) |
561 | { |
562 | struct iwcm_id_private *cm_id_priv; |
563 | unsigned long flags; |
564 | int ret; |
565 | |
566 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
567 | |
568 | if (!backlog) |
569 | backlog = default_backlog; |
570 | |
571 | ret = alloc_work_entries(cm_id_priv, count: backlog); |
572 | if (ret) |
573 | return ret; |
574 | |
575 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
576 | switch (cm_id_priv->state) { |
577 | case IW_CM_STATE_IDLE: |
578 | cm_id_priv->state = IW_CM_STATE_LISTEN; |
579 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
580 | ret = iw_cm_map(cm_id, active: false); |
581 | if (!ret) |
582 | ret = cm_id->device->ops.iw_create_listen(cm_id, |
583 | backlog); |
584 | if (ret) |
585 | cm_id_priv->state = IW_CM_STATE_IDLE; |
586 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
587 | break; |
588 | default: |
589 | ret = -EINVAL; |
590 | } |
591 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
592 | |
593 | return ret; |
594 | } |
595 | EXPORT_SYMBOL(iw_cm_listen); |
596 | |
597 | /* |
598 | * CM_ID <-- IDLE |
599 | * |
600 | * Rejects an inbound connection request. No events are generated. |
601 | */ |
602 | int iw_cm_reject(struct iw_cm_id *cm_id, |
603 | const void *private_data, |
604 | u8 private_data_len) |
605 | { |
606 | struct iwcm_id_private *cm_id_priv; |
607 | unsigned long flags; |
608 | int ret; |
609 | |
610 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
611 | set_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
612 | |
613 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
614 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { |
615 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
616 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
617 | wake_up_all(&cm_id_priv->connect_wait); |
618 | return -EINVAL; |
619 | } |
620 | cm_id_priv->state = IW_CM_STATE_IDLE; |
621 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
622 | |
623 | ret = cm_id->device->ops.iw_reject(cm_id, private_data, |
624 | private_data_len); |
625 | |
626 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
627 | wake_up_all(&cm_id_priv->connect_wait); |
628 | |
629 | return ret; |
630 | } |
631 | EXPORT_SYMBOL(iw_cm_reject); |
632 | |
633 | /* |
634 | * CM_ID <-- ESTABLISHED |
635 | * |
636 | * Accepts an inbound connection request and generates an ESTABLISHED |
637 | * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block |
638 | * until the ESTABLISHED event is received from the provider. |
639 | */ |
640 | int iw_cm_accept(struct iw_cm_id *cm_id, |
641 | struct iw_cm_conn_param *iw_param) |
642 | { |
643 | struct iwcm_id_private *cm_id_priv; |
644 | struct ib_qp *qp; |
645 | unsigned long flags; |
646 | int ret; |
647 | |
648 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
649 | set_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
650 | |
651 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
652 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { |
653 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
654 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
655 | wake_up_all(&cm_id_priv->connect_wait); |
656 | return -EINVAL; |
657 | } |
658 | /* Get the ib_qp given the QPN */ |
659 | qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn); |
660 | if (!qp) { |
661 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
662 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
663 | wake_up_all(&cm_id_priv->connect_wait); |
664 | return -EINVAL; |
665 | } |
666 | cm_id->device->ops.iw_add_ref(qp); |
667 | cm_id_priv->qp = qp; |
668 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
669 | |
670 | ret = cm_id->device->ops.iw_accept(cm_id, iw_param); |
671 | if (ret) { |
672 | /* An error on accept precludes provider events */ |
673 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); |
674 | cm_id_priv->state = IW_CM_STATE_IDLE; |
675 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
676 | qp = cm_id_priv->qp; |
677 | cm_id_priv->qp = NULL; |
678 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
679 | if (qp) |
680 | cm_id->device->ops.iw_rem_ref(qp); |
681 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
682 | wake_up_all(&cm_id_priv->connect_wait); |
683 | } |
684 | |
685 | return ret; |
686 | } |
687 | EXPORT_SYMBOL(iw_cm_accept); |
688 | |
689 | /* |
690 | * Active Side: CM_ID <-- CONN_SENT |
691 | * |
692 | * If successful, results in the generation of a CONNECT_REPLY |
693 | * event. iw_cm_disconnect and iw_cm_destroy will block until the |
694 | * CONNECT_REPLY event is received from the provider. |
695 | */ |
696 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) |
697 | { |
698 | struct iwcm_id_private *cm_id_priv; |
699 | int ret; |
700 | unsigned long flags; |
701 | struct ib_qp *qp = NULL; |
702 | |
703 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
704 | |
705 | ret = alloc_work_entries(cm_id_priv, count: 4); |
706 | if (ret) |
707 | return ret; |
708 | |
709 | set_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
710 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
711 | |
712 | if (cm_id_priv->state != IW_CM_STATE_IDLE) { |
713 | ret = -EINVAL; |
714 | goto err; |
715 | } |
716 | |
717 | /* Get the ib_qp given the QPN */ |
718 | qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn); |
719 | if (!qp) { |
720 | ret = -EINVAL; |
721 | goto err; |
722 | } |
723 | cm_id->device->ops.iw_add_ref(qp); |
724 | cm_id_priv->qp = qp; |
725 | cm_id_priv->state = IW_CM_STATE_CONN_SENT; |
726 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
727 | |
728 | ret = iw_cm_map(cm_id, active: true); |
729 | if (!ret) |
730 | ret = cm_id->device->ops.iw_connect(cm_id, iw_param); |
731 | if (!ret) |
732 | return 0; /* success */ |
733 | |
734 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
735 | qp = cm_id_priv->qp; |
736 | cm_id_priv->qp = NULL; |
737 | cm_id_priv->state = IW_CM_STATE_IDLE; |
738 | err: |
739 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
740 | if (qp) |
741 | cm_id->device->ops.iw_rem_ref(qp); |
742 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
743 | wake_up_all(&cm_id_priv->connect_wait); |
744 | return ret; |
745 | } |
746 | EXPORT_SYMBOL(iw_cm_connect); |
747 | |
748 | /* |
749 | * Passive Side: new CM_ID <-- CONN_RECV |
750 | * |
751 | * Handles an inbound connect request. The function creates a new |
752 | * iw_cm_id to represent the new connection and inherits the client |
753 | * callback function and other attributes from the listening parent. |
754 | * |
755 | * The work item contains a pointer to the listen_cm_id and the event. The |
756 | * listen_cm_id contains the client cm_handler, context and |
757 | * device. These are copied when the device is cloned. The event |
758 | * contains the new four tuple. |
759 | * |
760 | * An error on the child should not affect the parent, so this |
761 | * function does not return a value. |
762 | */ |
763 | static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, |
764 | struct iw_cm_event *iw_event) |
765 | { |
766 | unsigned long flags; |
767 | struct iw_cm_id *cm_id; |
768 | struct iwcm_id_private *cm_id_priv; |
769 | int ret; |
770 | |
771 | /* |
772 | * The provider should never generate a connection request |
773 | * event with a bad status. |
774 | */ |
775 | BUG_ON(iw_event->status); |
776 | |
777 | cm_id = iw_create_cm_id(listen_id_priv->id.device, |
778 | listen_id_priv->id.cm_handler, |
779 | listen_id_priv->id.context); |
780 | /* If the cm_id could not be created, ignore the request */ |
781 | if (IS_ERR(ptr: cm_id)) |
782 | goto out; |
783 | |
784 | cm_id->provider_data = iw_event->provider_data; |
785 | cm_id->m_local_addr = iw_event->local_addr; |
786 | cm_id->m_remote_addr = iw_event->remote_addr; |
787 | cm_id->local_addr = listen_id_priv->id.local_addr; |
788 | |
789 | ret = iwpm_get_remote_info(mapped_loc_addr: &listen_id_priv->id.m_local_addr, |
790 | mapped_rem_addr: &iw_event->remote_addr, |
791 | remote_addr: &cm_id->remote_addr, |
792 | nl_client: RDMA_NL_IWCM); |
793 | if (ret) { |
794 | cm_id->remote_addr = iw_event->remote_addr; |
795 | } else { |
796 | iw_cm_check_wildcard(pm_addr: &listen_id_priv->id.m_local_addr, |
797 | cm_addr: &iw_event->local_addr, |
798 | cm_outaddr: &cm_id->local_addr); |
799 | iw_event->local_addr = cm_id->local_addr; |
800 | iw_event->remote_addr = cm_id->remote_addr; |
801 | } |
802 | |
803 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
804 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; |
805 | |
806 | /* |
807 | * We could be destroying the listening id. If so, ignore this |
808 | * upcall. |
809 | */ |
810 | spin_lock_irqsave(&listen_id_priv->lock, flags); |
811 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { |
812 | spin_unlock_irqrestore(lock: &listen_id_priv->lock, flags); |
813 | iw_cm_reject(cm_id, NULL, 0); |
814 | iw_destroy_cm_id(cm_id); |
815 | goto out; |
816 | } |
817 | spin_unlock_irqrestore(lock: &listen_id_priv->lock, flags); |
818 | |
819 | ret = alloc_work_entries(cm_id_priv, count: 3); |
820 | if (ret) { |
821 | iw_cm_reject(cm_id, NULL, 0); |
822 | iw_destroy_cm_id(cm_id); |
823 | goto out; |
824 | } |
825 | |
826 | /* Call the client CM handler */ |
827 | ret = cm_id->cm_handler(cm_id, iw_event); |
828 | if (ret) { |
829 | iw_cm_reject(cm_id, NULL, 0); |
830 | iw_destroy_cm_id(cm_id); |
831 | } |
832 | |
833 | out: |
834 | if (iw_event->private_data_len) |
835 | kfree(objp: iw_event->private_data); |
836 | } |
837 | |
838 | /* |
839 | * Passive Side: CM_ID <-- ESTABLISHED |
840 | * |
841 | * The provider generated an ESTABLISHED event which means that |
842 | * the MPA negotion has completed successfully and we are now in MPA |
843 | * FPDU mode. |
844 | * |
845 | * This event can only be received in the CONN_RECV state. If the |
846 | * remote peer closed, the ESTABLISHED event would be received followed |
847 | * by the CLOSE event. If the app closes, it will block until we wake |
848 | * it up after processing this event. |
849 | */ |
850 | static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, |
851 | struct iw_cm_event *iw_event) |
852 | { |
853 | unsigned long flags; |
854 | int ret; |
855 | |
856 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
857 | |
858 | /* |
859 | * We clear the CONNECT_WAIT bit here to allow the callback |
860 | * function to call iw_cm_disconnect. Calling iw_destroy_cm_id |
861 | * from a callback handler is not allowed. |
862 | */ |
863 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
864 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); |
865 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; |
866 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
867 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); |
868 | wake_up_all(&cm_id_priv->connect_wait); |
869 | |
870 | return ret; |
871 | } |
872 | |
873 | /* |
874 | * Active Side: CM_ID <-- ESTABLISHED |
875 | * |
876 | * The app has called connect and is waiting for the established event to |
877 | * post it's requests to the server. This event will wake up anyone |
878 | * blocked in iw_cm_disconnect or iw_destroy_id. |
879 | */ |
880 | static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, |
881 | struct iw_cm_event *iw_event) |
882 | { |
883 | struct ib_qp *qp = NULL; |
884 | unsigned long flags; |
885 | int ret; |
886 | |
887 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
888 | /* |
889 | * Clear the connect wait bit so a callback function calling |
890 | * iw_cm_disconnect will not wait and deadlock this thread |
891 | */ |
892 | clear_bit(IWCM_F_CONNECT_WAIT, addr: &cm_id_priv->flags); |
893 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); |
894 | if (iw_event->status == 0) { |
895 | cm_id_priv->id.m_local_addr = iw_event->local_addr; |
896 | cm_id_priv->id.m_remote_addr = iw_event->remote_addr; |
897 | iw_event->local_addr = cm_id_priv->id.local_addr; |
898 | iw_event->remote_addr = cm_id_priv->id.remote_addr; |
899 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; |
900 | } else { |
901 | /* REJECTED or RESET */ |
902 | qp = cm_id_priv->qp; |
903 | cm_id_priv->qp = NULL; |
904 | cm_id_priv->state = IW_CM_STATE_IDLE; |
905 | } |
906 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
907 | if (qp) |
908 | cm_id_priv->id.device->ops.iw_rem_ref(qp); |
909 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); |
910 | |
911 | if (iw_event->private_data_len) |
912 | kfree(objp: iw_event->private_data); |
913 | |
914 | /* Wake up waiters on connect complete */ |
915 | wake_up_all(&cm_id_priv->connect_wait); |
916 | |
917 | return ret; |
918 | } |
919 | |
920 | /* |
921 | * CM_ID <-- CLOSING |
922 | * |
923 | * If in the ESTABLISHED state, move to CLOSING. |
924 | */ |
925 | static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, |
926 | struct iw_cm_event *iw_event) |
927 | { |
928 | unsigned long flags; |
929 | |
930 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
931 | if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) |
932 | cm_id_priv->state = IW_CM_STATE_CLOSING; |
933 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
934 | } |
935 | |
936 | /* |
937 | * CM_ID <-- IDLE |
938 | * |
939 | * If in the ESTBLISHED or CLOSING states, the QP will have have been |
940 | * moved by the provider to the ERR state. Disassociate the CM_ID from |
941 | * the QP, move to IDLE, and remove the 'connected' reference. |
942 | * |
943 | * If in some other state, the cm_id was destroyed asynchronously. |
944 | * This is the last reference that will result in waking up |
945 | * the app thread blocked in iw_destroy_cm_id. |
946 | */ |
947 | static int cm_close_handler(struct iwcm_id_private *cm_id_priv, |
948 | struct iw_cm_event *iw_event) |
949 | { |
950 | struct ib_qp *qp; |
951 | unsigned long flags; |
952 | int ret = 0, notify_event = 0; |
953 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
954 | qp = cm_id_priv->qp; |
955 | cm_id_priv->qp = NULL; |
956 | |
957 | switch (cm_id_priv->state) { |
958 | case IW_CM_STATE_ESTABLISHED: |
959 | case IW_CM_STATE_CLOSING: |
960 | cm_id_priv->state = IW_CM_STATE_IDLE; |
961 | notify_event = 1; |
962 | break; |
963 | case IW_CM_STATE_DESTROYING: |
964 | break; |
965 | default: |
966 | BUG(); |
967 | } |
968 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
969 | |
970 | if (qp) |
971 | cm_id_priv->id.device->ops.iw_rem_ref(qp); |
972 | if (notify_event) |
973 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); |
974 | return ret; |
975 | } |
976 | |
977 | static int process_event(struct iwcm_id_private *cm_id_priv, |
978 | struct iw_cm_event *iw_event) |
979 | { |
980 | int ret = 0; |
981 | |
982 | switch (iw_event->event) { |
983 | case IW_CM_EVENT_CONNECT_REQUEST: |
984 | cm_conn_req_handler(listen_id_priv: cm_id_priv, iw_event); |
985 | break; |
986 | case IW_CM_EVENT_CONNECT_REPLY: |
987 | ret = cm_conn_rep_handler(cm_id_priv, iw_event); |
988 | break; |
989 | case IW_CM_EVENT_ESTABLISHED: |
990 | ret = cm_conn_est_handler(cm_id_priv, iw_event); |
991 | break; |
992 | case IW_CM_EVENT_DISCONNECT: |
993 | cm_disconnect_handler(cm_id_priv, iw_event); |
994 | break; |
995 | case IW_CM_EVENT_CLOSE: |
996 | ret = cm_close_handler(cm_id_priv, iw_event); |
997 | break; |
998 | default: |
999 | BUG(); |
1000 | } |
1001 | |
1002 | return ret; |
1003 | } |
1004 | |
1005 | /* |
1006 | * Process events on the work_list for the cm_id. If the callback |
1007 | * function requests that the cm_id be deleted, a flag is set in the |
1008 | * cm_id flags to indicate that when the last reference is |
1009 | * removed, the cm_id is to be destroyed. This is necessary to |
1010 | * distinguish between an object that will be destroyed by the app |
1011 | * thread asleep on the destroy_comp list vs. an object destroyed |
1012 | * here synchronously when the last reference is removed. |
1013 | */ |
1014 | static void cm_work_handler(struct work_struct *_work) |
1015 | { |
1016 | struct iwcm_work *work = container_of(_work, struct iwcm_work, work); |
1017 | struct iw_cm_event levent; |
1018 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
1019 | unsigned long flags; |
1020 | int empty; |
1021 | int ret = 0; |
1022 | |
1023 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1024 | empty = list_empty(head: &cm_id_priv->work_list); |
1025 | while (!empty) { |
1026 | work = list_entry(cm_id_priv->work_list.next, |
1027 | struct iwcm_work, list); |
1028 | list_del_init(entry: &work->list); |
1029 | empty = list_empty(head: &cm_id_priv->work_list); |
1030 | levent = work->event; |
1031 | put_work(work); |
1032 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
1033 | |
1034 | if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { |
1035 | ret = process_event(cm_id_priv, iw_event: &levent); |
1036 | if (ret) |
1037 | destroy_cm_id(cm_id: &cm_id_priv->id); |
1038 | } else |
1039 | pr_debug("dropping event %d\n" , levent.event); |
1040 | if (iwcm_deref_id(cm_id_priv)) |
1041 | return; |
1042 | if (empty) |
1043 | return; |
1044 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1045 | } |
1046 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
1047 | } |
1048 | |
1049 | /* |
1050 | * This function is called on interrupt context. Schedule events on |
1051 | * the iwcm_wq thread to allow callback functions to downcall into |
1052 | * the CM and/or block. Events are queued to a per-CM_ID |
1053 | * work_list. If this is the first event on the work_list, the work |
1054 | * element is also queued on the iwcm_wq thread. |
1055 | * |
1056 | * Each event holds a reference on the cm_id. Until the last posted |
1057 | * event has been delivered and processed, the cm_id cannot be |
1058 | * deleted. |
1059 | * |
1060 | * Returns: |
1061 | * 0 - the event was handled. |
1062 | * -ENOMEM - the event was not handled due to lack of resources. |
1063 | */ |
1064 | static int cm_event_handler(struct iw_cm_id *cm_id, |
1065 | struct iw_cm_event *iw_event) |
1066 | { |
1067 | struct iwcm_work *work; |
1068 | struct iwcm_id_private *cm_id_priv; |
1069 | unsigned long flags; |
1070 | int ret = 0; |
1071 | |
1072 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
1073 | |
1074 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1075 | work = get_work(cm_id_priv); |
1076 | if (!work) { |
1077 | ret = -ENOMEM; |
1078 | goto out; |
1079 | } |
1080 | |
1081 | INIT_WORK(&work->work, cm_work_handler); |
1082 | work->cm_id = cm_id_priv; |
1083 | work->event = *iw_event; |
1084 | |
1085 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || |
1086 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && |
1087 | work->event.private_data_len) { |
1088 | ret = copy_private_data(event: &work->event); |
1089 | if (ret) { |
1090 | put_work(work); |
1091 | goto out; |
1092 | } |
1093 | } |
1094 | |
1095 | refcount_inc(r: &cm_id_priv->refcount); |
1096 | if (list_empty(head: &cm_id_priv->work_list)) { |
1097 | list_add_tail(new: &work->list, head: &cm_id_priv->work_list); |
1098 | queue_work(wq: iwcm_wq, work: &work->work); |
1099 | } else |
1100 | list_add_tail(new: &work->list, head: &cm_id_priv->work_list); |
1101 | out: |
1102 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
1103 | return ret; |
1104 | } |
1105 | |
1106 | static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, |
1107 | struct ib_qp_attr *qp_attr, |
1108 | int *qp_attr_mask) |
1109 | { |
1110 | unsigned long flags; |
1111 | int ret; |
1112 | |
1113 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1114 | switch (cm_id_priv->state) { |
1115 | case IW_CM_STATE_IDLE: |
1116 | case IW_CM_STATE_CONN_SENT: |
1117 | case IW_CM_STATE_CONN_RECV: |
1118 | case IW_CM_STATE_ESTABLISHED: |
1119 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; |
1120 | qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| |
1121 | IB_ACCESS_REMOTE_READ; |
1122 | ret = 0; |
1123 | break; |
1124 | default: |
1125 | ret = -EINVAL; |
1126 | break; |
1127 | } |
1128 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
1129 | return ret; |
1130 | } |
1131 | |
1132 | static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, |
1133 | struct ib_qp_attr *qp_attr, |
1134 | int *qp_attr_mask) |
1135 | { |
1136 | unsigned long flags; |
1137 | int ret; |
1138 | |
1139 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1140 | switch (cm_id_priv->state) { |
1141 | case IW_CM_STATE_IDLE: |
1142 | case IW_CM_STATE_CONN_SENT: |
1143 | case IW_CM_STATE_CONN_RECV: |
1144 | case IW_CM_STATE_ESTABLISHED: |
1145 | *qp_attr_mask = 0; |
1146 | ret = 0; |
1147 | break; |
1148 | default: |
1149 | ret = -EINVAL; |
1150 | break; |
1151 | } |
1152 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
1153 | return ret; |
1154 | } |
1155 | |
1156 | int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, |
1157 | struct ib_qp_attr *qp_attr, |
1158 | int *qp_attr_mask) |
1159 | { |
1160 | struct iwcm_id_private *cm_id_priv; |
1161 | int ret; |
1162 | |
1163 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
1164 | switch (qp_attr->qp_state) { |
1165 | case IB_QPS_INIT: |
1166 | case IB_QPS_RTR: |
1167 | ret = iwcm_init_qp_init_attr(cm_id_priv, |
1168 | qp_attr, qp_attr_mask); |
1169 | break; |
1170 | case IB_QPS_RTS: |
1171 | ret = iwcm_init_qp_rts_attr(cm_id_priv, |
1172 | qp_attr, qp_attr_mask); |
1173 | break; |
1174 | default: |
1175 | ret = -EINVAL; |
1176 | break; |
1177 | } |
1178 | return ret; |
1179 | } |
1180 | EXPORT_SYMBOL(iw_cm_init_qp_attr); |
1181 | |
1182 | static int __init iw_cm_init(void) |
1183 | { |
1184 | int ret; |
1185 | |
1186 | ret = iwpm_init(RDMA_NL_IWCM); |
1187 | if (ret) |
1188 | return ret; |
1189 | |
1190 | iwcm_wq = alloc_ordered_workqueue("iw_cm_wq" , 0); |
1191 | if (!iwcm_wq) |
1192 | goto err_alloc; |
1193 | |
1194 | iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm" , |
1195 | iwcm_ctl_table); |
1196 | if (!iwcm_ctl_table_hdr) { |
1197 | pr_err("iw_cm: couldn't register sysctl paths\n" ); |
1198 | goto err_sysctl; |
1199 | } |
1200 | |
1201 | rdma_nl_register(index: RDMA_NL_IWCM, cb_table: iwcm_nl_cb_table); |
1202 | return 0; |
1203 | |
1204 | err_sysctl: |
1205 | destroy_workqueue(wq: iwcm_wq); |
1206 | err_alloc: |
1207 | iwpm_exit(RDMA_NL_IWCM); |
1208 | return -ENOMEM; |
1209 | } |
1210 | |
1211 | static void __exit iw_cm_cleanup(void) |
1212 | { |
1213 | rdma_nl_unregister(index: RDMA_NL_IWCM); |
1214 | unregister_net_sysctl_table(header: iwcm_ctl_table_hdr); |
1215 | destroy_workqueue(wq: iwcm_wq); |
1216 | iwpm_exit(RDMA_NL_IWCM); |
1217 | } |
1218 | |
1219 | MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2); |
1220 | |
1221 | module_init(iw_cm_init); |
1222 | module_exit(iw_cm_cleanup); |
1223 | |