1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
2 | /* |
3 | * Copyright(c) 2016 - 2020 Intel Corporation. |
4 | */ |
5 | |
6 | #include <linux/hash.h> |
7 | #include <linux/bitops.h> |
8 | #include <linux/lockdep.h> |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/slab.h> |
11 | #include <rdma/ib_verbs.h> |
12 | #include <rdma/ib_hdrs.h> |
13 | #include <rdma/opa_addr.h> |
14 | #include <rdma/uverbs_ioctl.h> |
15 | #include "qp.h" |
16 | #include "vt.h" |
17 | #include "trace.h" |
18 | |
19 | #define RVT_RWQ_COUNT_THRESHOLD 16 |
20 | |
21 | static void rvt_rc_timeout(struct timer_list *t); |
22 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
23 | enum ib_qp_type type); |
24 | |
25 | /* |
26 | * Convert the AETH RNR timeout code into the number of microseconds. |
27 | */ |
28 | static const u32 ib_rvt_rnr_table[32] = { |
29 | 655360, /* 00: 655.36 */ |
30 | 10, /* 01: .01 */ |
31 | 20, /* 02 .02 */ |
32 | 30, /* 03: .03 */ |
33 | 40, /* 04: .04 */ |
34 | 60, /* 05: .06 */ |
35 | 80, /* 06: .08 */ |
36 | 120, /* 07: .12 */ |
37 | 160, /* 08: .16 */ |
38 | 240, /* 09: .24 */ |
39 | 320, /* 0A: .32 */ |
40 | 480, /* 0B: .48 */ |
41 | 640, /* 0C: .64 */ |
42 | 960, /* 0D: .96 */ |
43 | 1280, /* 0E: 1.28 */ |
44 | 1920, /* 0F: 1.92 */ |
45 | 2560, /* 10: 2.56 */ |
46 | 3840, /* 11: 3.84 */ |
47 | 5120, /* 12: 5.12 */ |
48 | 7680, /* 13: 7.68 */ |
49 | 10240, /* 14: 10.24 */ |
50 | 15360, /* 15: 15.36 */ |
51 | 20480, /* 16: 20.48 */ |
52 | 30720, /* 17: 30.72 */ |
53 | 40960, /* 18: 40.96 */ |
54 | 61440, /* 19: 61.44 */ |
55 | 81920, /* 1A: 81.92 */ |
56 | 122880, /* 1B: 122.88 */ |
57 | 163840, /* 1C: 163.84 */ |
58 | 245760, /* 1D: 245.76 */ |
59 | 327680, /* 1E: 327.68 */ |
60 | 491520 /* 1F: 491.52 */ |
61 | }; |
62 | |
63 | /* |
64 | * Note that it is OK to post send work requests in the SQE and ERR |
65 | * states; rvt_do_send() will process them and generate error |
66 | * completions as per IB 1.2 C10-96. |
67 | */ |
68 | const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { |
69 | [IB_QPS_RESET] = 0, |
70 | [IB_QPS_INIT] = RVT_POST_RECV_OK, |
71 | [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, |
72 | [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
73 | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | |
74 | RVT_PROCESS_NEXT_SEND_OK, |
75 | [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
76 | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, |
77 | [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
78 | RVT_POST_SEND_OK | RVT_FLUSH_SEND, |
79 | [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | |
80 | RVT_POST_SEND_OK | RVT_FLUSH_SEND, |
81 | }; |
82 | EXPORT_SYMBOL(ib_rvt_state_ops); |
83 | |
84 | /* platform specific: return the last level cache (llc) size, in KiB */ |
85 | static int rvt_wss_llc_size(void) |
86 | { |
87 | /* assume that the boot CPU value is universal for all CPUs */ |
88 | return boot_cpu_data.x86_cache_size; |
89 | } |
90 | |
91 | /* platform specific: cacheless copy */ |
92 | static void cacheless_memcpy(void *dst, void *src, size_t n) |
93 | { |
94 | /* |
95 | * Use the only available X64 cacheless copy. Add a __user cast |
96 | * to quiet sparse. The src agument is already in the kernel so |
97 | * there are no security issues. The extra fault recovery machinery |
98 | * is not invoked. |
99 | */ |
100 | __copy_user_nocache(dst, src: (void __user *)src, size: n); |
101 | } |
102 | |
103 | void rvt_wss_exit(struct rvt_dev_info *rdi) |
104 | { |
105 | struct rvt_wss *wss = rdi->wss; |
106 | |
107 | if (!wss) |
108 | return; |
109 | |
110 | /* coded to handle partially initialized and repeat callers */ |
111 | kfree(objp: wss->entries); |
112 | wss->entries = NULL; |
113 | kfree(objp: rdi->wss); |
114 | rdi->wss = NULL; |
115 | } |
116 | |
117 | /* |
118 | * rvt_wss_init - Init wss data structures |
119 | * |
120 | * Return: 0 on success |
121 | */ |
122 | int rvt_wss_init(struct rvt_dev_info *rdi) |
123 | { |
124 | unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; |
125 | unsigned int wss_threshold = rdi->dparms.wss_threshold; |
126 | unsigned int wss_clean_period = rdi->dparms.wss_clean_period; |
127 | long llc_size; |
128 | long llc_bits; |
129 | long table_size; |
130 | long table_bits; |
131 | struct rvt_wss *wss; |
132 | int node = rdi->dparms.node; |
133 | |
134 | if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) { |
135 | rdi->wss = NULL; |
136 | return 0; |
137 | } |
138 | |
139 | rdi->wss = kzalloc_node(size: sizeof(*rdi->wss), GFP_KERNEL, node); |
140 | if (!rdi->wss) |
141 | return -ENOMEM; |
142 | wss = rdi->wss; |
143 | |
144 | /* check for a valid percent range - default to 80 if none or invalid */ |
145 | if (wss_threshold < 1 || wss_threshold > 100) |
146 | wss_threshold = 80; |
147 | |
148 | /* reject a wildly large period */ |
149 | if (wss_clean_period > 1000000) |
150 | wss_clean_period = 256; |
151 | |
152 | /* reject a zero period */ |
153 | if (wss_clean_period == 0) |
154 | wss_clean_period = 1; |
155 | |
156 | /* |
157 | * Calculate the table size - the next power of 2 larger than the |
158 | * LLC size. LLC size is in KiB. |
159 | */ |
160 | llc_size = rvt_wss_llc_size() * 1024; |
161 | table_size = roundup_pow_of_two(llc_size); |
162 | |
163 | /* one bit per page in rounded up table */ |
164 | llc_bits = llc_size / PAGE_SIZE; |
165 | table_bits = table_size / PAGE_SIZE; |
166 | wss->pages_mask = table_bits - 1; |
167 | wss->num_entries = table_bits / BITS_PER_LONG; |
168 | |
169 | wss->threshold = (llc_bits * wss_threshold) / 100; |
170 | if (wss->threshold == 0) |
171 | wss->threshold = 1; |
172 | |
173 | wss->clean_period = wss_clean_period; |
174 | atomic_set(v: &wss->clean_counter, i: wss_clean_period); |
175 | |
176 | wss->entries = kcalloc_node(n: wss->num_entries, size: sizeof(*wss->entries), |
177 | GFP_KERNEL, node); |
178 | if (!wss->entries) { |
179 | rvt_wss_exit(rdi); |
180 | return -ENOMEM; |
181 | } |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | /* |
187 | * Advance the clean counter. When the clean period has expired, |
188 | * clean an entry. |
189 | * |
190 | * This is implemented in atomics to avoid locking. Because multiple |
191 | * variables are involved, it can be racy which can lead to slightly |
192 | * inaccurate information. Since this is only a heuristic, this is |
193 | * OK. Any innaccuracies will clean themselves out as the counter |
194 | * advances. That said, it is unlikely the entry clean operation will |
195 | * race - the next possible racer will not start until the next clean |
196 | * period. |
197 | * |
198 | * The clean counter is implemented as a decrement to zero. When zero |
199 | * is reached an entry is cleaned. |
200 | */ |
201 | static void wss_advance_clean_counter(struct rvt_wss *wss) |
202 | { |
203 | int entry; |
204 | int weight; |
205 | unsigned long bits; |
206 | |
207 | /* become the cleaner if we decrement the counter to zero */ |
208 | if (atomic_dec_and_test(v: &wss->clean_counter)) { |
209 | /* |
210 | * Set, not add, the clean period. This avoids an issue |
211 | * where the counter could decrement below the clean period. |
212 | * Doing a set can result in lost decrements, slowing the |
213 | * clean advance. Since this a heuristic, this possible |
214 | * slowdown is OK. |
215 | * |
216 | * An alternative is to loop, advancing the counter by a |
217 | * clean period until the result is > 0. However, this could |
218 | * lead to several threads keeping another in the clean loop. |
219 | * This could be mitigated by limiting the number of times |
220 | * we stay in the loop. |
221 | */ |
222 | atomic_set(v: &wss->clean_counter, i: wss->clean_period); |
223 | |
224 | /* |
225 | * Uniquely grab the entry to clean and move to next. |
226 | * The current entry is always the lower bits of |
227 | * wss.clean_entry. The table size, wss.num_entries, |
228 | * is always a power-of-2. |
229 | */ |
230 | entry = (atomic_inc_return(v: &wss->clean_entry) - 1) |
231 | & (wss->num_entries - 1); |
232 | |
233 | /* clear the entry and count the bits */ |
234 | bits = xchg(&wss->entries[entry], 0); |
235 | weight = hweight64((u64)bits); |
236 | /* only adjust the contended total count if needed */ |
237 | if (weight) |
238 | atomic_sub(i: weight, v: &wss->total_count); |
239 | } |
240 | } |
241 | |
242 | /* |
243 | * Insert the given address into the working set array. |
244 | */ |
245 | static void wss_insert(struct rvt_wss *wss, void *address) |
246 | { |
247 | u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask; |
248 | u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ |
249 | u32 nr = page & (BITS_PER_LONG - 1); |
250 | |
251 | if (!test_and_set_bit(nr, addr: &wss->entries[entry])) |
252 | atomic_inc(v: &wss->total_count); |
253 | |
254 | wss_advance_clean_counter(wss); |
255 | } |
256 | |
257 | /* |
258 | * Is the working set larger than the threshold? |
259 | */ |
260 | static inline bool wss_exceeds_threshold(struct rvt_wss *wss) |
261 | { |
262 | return atomic_read(v: &wss->total_count) >= wss->threshold; |
263 | } |
264 | |
265 | static void get_map_page(struct rvt_qpn_table *qpt, |
266 | struct rvt_qpn_map *map) |
267 | { |
268 | unsigned long page = get_zeroed_page(GFP_KERNEL); |
269 | |
270 | /* |
271 | * Free the page if someone raced with us installing it. |
272 | */ |
273 | |
274 | spin_lock(lock: &qpt->lock); |
275 | if (map->page) |
276 | free_page(page); |
277 | else |
278 | map->page = (void *)page; |
279 | spin_unlock(lock: &qpt->lock); |
280 | } |
281 | |
282 | /** |
283 | * init_qpn_table - initialize the QP number table for a device |
284 | * @rdi: rvt dev struct |
285 | * @qpt: the QPN table |
286 | */ |
287 | static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) |
288 | { |
289 | u32 offset, i; |
290 | struct rvt_qpn_map *map; |
291 | int ret = 0; |
292 | |
293 | if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) |
294 | return -EINVAL; |
295 | |
296 | spin_lock_init(&qpt->lock); |
297 | |
298 | qpt->last = rdi->dparms.qpn_start; |
299 | qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; |
300 | |
301 | /* |
302 | * Drivers may want some QPs beyond what we need for verbs let them use |
303 | * our qpn table. No need for two. Lets go ahead and mark the bitmaps |
304 | * for those. The reserved range must be *after* the range which verbs |
305 | * will pick from. |
306 | */ |
307 | |
308 | /* Figure out number of bit maps needed before reserved range */ |
309 | qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; |
310 | |
311 | /* This should always be zero */ |
312 | offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; |
313 | |
314 | /* Starting with the first reserved bit map */ |
315 | map = &qpt->map[qpt->nmaps]; |
316 | |
317 | rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n" , |
318 | rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); |
319 | for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { |
320 | if (!map->page) { |
321 | get_map_page(qpt, map); |
322 | if (!map->page) { |
323 | ret = -ENOMEM; |
324 | break; |
325 | } |
326 | } |
327 | set_bit(nr: offset, addr: map->page); |
328 | offset++; |
329 | if (offset == RVT_BITS_PER_PAGE) { |
330 | /* next page */ |
331 | qpt->nmaps++; |
332 | map++; |
333 | offset = 0; |
334 | } |
335 | } |
336 | return ret; |
337 | } |
338 | |
339 | /** |
340 | * free_qpn_table - free the QP number table for a device |
341 | * @qpt: the QPN table |
342 | */ |
343 | static void free_qpn_table(struct rvt_qpn_table *qpt) |
344 | { |
345 | int i; |
346 | |
347 | for (i = 0; i < ARRAY_SIZE(qpt->map); i++) |
348 | free_page((unsigned long)qpt->map[i].page); |
349 | } |
350 | |
351 | /** |
352 | * rvt_driver_qp_init - Init driver qp resources |
353 | * @rdi: rvt dev strucutre |
354 | * |
355 | * Return: 0 on success |
356 | */ |
357 | int rvt_driver_qp_init(struct rvt_dev_info *rdi) |
358 | { |
359 | int i; |
360 | int ret = -ENOMEM; |
361 | |
362 | if (!rdi->dparms.qp_table_size) |
363 | return -EINVAL; |
364 | |
365 | /* |
366 | * If driver is not doing any QP allocation then make sure it is |
367 | * providing the necessary QP functions. |
368 | */ |
369 | if (!rdi->driver_f.free_all_qps || |
370 | !rdi->driver_f.qp_priv_alloc || |
371 | !rdi->driver_f.qp_priv_free || |
372 | !rdi->driver_f.notify_qp_reset || |
373 | !rdi->driver_f.notify_restart_rc) |
374 | return -EINVAL; |
375 | |
376 | /* allocate parent object */ |
377 | rdi->qp_dev = kzalloc_node(size: sizeof(*rdi->qp_dev), GFP_KERNEL, |
378 | node: rdi->dparms.node); |
379 | if (!rdi->qp_dev) |
380 | return -ENOMEM; |
381 | |
382 | /* allocate hash table */ |
383 | rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; |
384 | rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); |
385 | rdi->qp_dev->qp_table = |
386 | kmalloc_array_node(n: rdi->qp_dev->qp_table_size, |
387 | size: sizeof(*rdi->qp_dev->qp_table), |
388 | GFP_KERNEL, node: rdi->dparms.node); |
389 | if (!rdi->qp_dev->qp_table) |
390 | goto no_qp_table; |
391 | |
392 | for (i = 0; i < rdi->qp_dev->qp_table_size; i++) |
393 | RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); |
394 | |
395 | spin_lock_init(&rdi->qp_dev->qpt_lock); |
396 | |
397 | /* initialize qpn map */ |
398 | if (init_qpn_table(rdi, qpt: &rdi->qp_dev->qpn_table)) |
399 | goto fail_table; |
400 | |
401 | spin_lock_init(&rdi->n_qps_lock); |
402 | |
403 | return 0; |
404 | |
405 | fail_table: |
406 | kfree(objp: rdi->qp_dev->qp_table); |
407 | free_qpn_table(qpt: &rdi->qp_dev->qpn_table); |
408 | |
409 | no_qp_table: |
410 | kfree(objp: rdi->qp_dev); |
411 | |
412 | return ret; |
413 | } |
414 | |
415 | /** |
416 | * rvt_free_qp_cb - callback function to reset a qp |
417 | * @qp: the qp to reset |
418 | * @v: a 64-bit value |
419 | * |
420 | * This function resets the qp and removes it from the |
421 | * qp hash table. |
422 | */ |
423 | static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) |
424 | { |
425 | unsigned int *qp_inuse = (unsigned int *)v; |
426 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
427 | |
428 | /* Reset the qp and remove it from the qp hash list */ |
429 | rvt_reset_qp(rdi, qp, type: qp->ibqp.qp_type); |
430 | |
431 | /* Increment the qp_inuse count */ |
432 | (*qp_inuse)++; |
433 | } |
434 | |
435 | /** |
436 | * rvt_free_all_qps - check for QPs still in use |
437 | * @rdi: rvt device info structure |
438 | * |
439 | * There should not be any QPs still in use. |
440 | * Free memory for table. |
441 | * Return the number of QPs still in use. |
442 | */ |
443 | static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) |
444 | { |
445 | unsigned int qp_inuse = 0; |
446 | |
447 | qp_inuse += rvt_mcast_tree_empty(rdi); |
448 | |
449 | rvt_qp_iter(rdi, v: (u64)&qp_inuse, cb: rvt_free_qp_cb); |
450 | |
451 | return qp_inuse; |
452 | } |
453 | |
454 | /** |
455 | * rvt_qp_exit - clean up qps on device exit |
456 | * @rdi: rvt dev structure |
457 | * |
458 | * Check for qp leaks and free resources. |
459 | */ |
460 | void rvt_qp_exit(struct rvt_dev_info *rdi) |
461 | { |
462 | u32 qps_inuse = rvt_free_all_qps(rdi); |
463 | |
464 | if (qps_inuse) |
465 | rvt_pr_err(rdi, "QP memory leak! %u still in use\n" , |
466 | qps_inuse); |
467 | |
468 | kfree(objp: rdi->qp_dev->qp_table); |
469 | free_qpn_table(qpt: &rdi->qp_dev->qpn_table); |
470 | kfree(objp: rdi->qp_dev); |
471 | } |
472 | |
473 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
474 | struct rvt_qpn_map *map, unsigned off) |
475 | { |
476 | return (map - qpt->map) * RVT_BITS_PER_PAGE + off; |
477 | } |
478 | |
479 | /** |
480 | * alloc_qpn - Allocate the next available qpn or zero/one for QP type |
481 | * IB_QPT_SMI/IB_QPT_GSI |
482 | * @rdi: rvt device info structure |
483 | * @qpt: queue pair number table pointer |
484 | * @type: the QP type |
485 | * @port_num: IB port number, 1 based, comes from core |
486 | * @exclude_prefix: prefix of special queue pair number being allocated |
487 | * |
488 | * Return: The queue pair number |
489 | */ |
490 | static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, |
491 | enum ib_qp_type type, u8 port_num, u8 exclude_prefix) |
492 | { |
493 | u32 i, offset, max_scan, qpn; |
494 | struct rvt_qpn_map *map; |
495 | u32 ret; |
496 | u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ? |
497 | RVT_AIP_QPN_MAX : RVT_QPN_MAX; |
498 | |
499 | if (rdi->driver_f.alloc_qpn) |
500 | return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); |
501 | |
502 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { |
503 | unsigned n; |
504 | |
505 | ret = type == IB_QPT_GSI; |
506 | n = 1 << (ret + 2 * (port_num - 1)); |
507 | spin_lock(lock: &qpt->lock); |
508 | if (qpt->flags & n) |
509 | ret = -EINVAL; |
510 | else |
511 | qpt->flags |= n; |
512 | spin_unlock(lock: &qpt->lock); |
513 | goto bail; |
514 | } |
515 | |
516 | qpn = qpt->last + qpt->incr; |
517 | if (qpn >= max_qpn) |
518 | qpn = qpt->incr | ((qpt->last & 1) ^ 1); |
519 | /* offset carries bit 0 */ |
520 | offset = qpn & RVT_BITS_PER_PAGE_MASK; |
521 | map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; |
522 | max_scan = qpt->nmaps - !offset; |
523 | for (i = 0;;) { |
524 | if (unlikely(!map->page)) { |
525 | get_map_page(qpt, map); |
526 | if (unlikely(!map->page)) |
527 | break; |
528 | } |
529 | do { |
530 | if (!test_and_set_bit(nr: offset, addr: map->page)) { |
531 | qpt->last = qpn; |
532 | ret = qpn; |
533 | goto bail; |
534 | } |
535 | offset += qpt->incr; |
536 | /* |
537 | * This qpn might be bogus if offset >= BITS_PER_PAGE. |
538 | * That is OK. It gets re-assigned below |
539 | */ |
540 | qpn = mk_qpn(qpt, map, off: offset); |
541 | } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); |
542 | /* |
543 | * In order to keep the number of pages allocated to a |
544 | * minimum, we scan the all existing pages before increasing |
545 | * the size of the bitmap table. |
546 | */ |
547 | if (++i > max_scan) { |
548 | if (qpt->nmaps == RVT_QPNMAP_ENTRIES) |
549 | break; |
550 | map = &qpt->map[qpt->nmaps++]; |
551 | /* start at incr with current bit 0 */ |
552 | offset = qpt->incr | (offset & 1); |
553 | } else if (map < &qpt->map[qpt->nmaps]) { |
554 | ++map; |
555 | /* start at incr with current bit 0 */ |
556 | offset = qpt->incr | (offset & 1); |
557 | } else { |
558 | map = &qpt->map[0]; |
559 | /* wrap to first map page, invert bit 0 */ |
560 | offset = qpt->incr | ((offset & 1) ^ 1); |
561 | } |
562 | /* there can be no set bits in low-order QoS bits */ |
563 | WARN_ON(rdi->dparms.qos_shift > 1 && |
564 | offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); |
565 | qpn = mk_qpn(qpt, map, off: offset); |
566 | } |
567 | |
568 | ret = -ENOMEM; |
569 | |
570 | bail: |
571 | return ret; |
572 | } |
573 | |
574 | /** |
575 | * rvt_clear_mr_refs - Drop help mr refs |
576 | * @qp: rvt qp data structure |
577 | * @clr_sends: If shoudl clear send side or not |
578 | */ |
579 | static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) |
580 | { |
581 | unsigned n; |
582 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
583 | |
584 | if (test_and_clear_bit(RVT_R_REWIND_SGE, addr: &qp->r_aflags)) |
585 | rvt_put_ss(ss: &qp->s_rdma_read_sge); |
586 | |
587 | rvt_put_ss(ss: &qp->r_sge); |
588 | |
589 | if (clr_sends) { |
590 | while (qp->s_last != qp->s_head) { |
591 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n: qp->s_last); |
592 | |
593 | rvt_put_qp_swqe(qp, wqe); |
594 | if (++qp->s_last >= qp->s_size) |
595 | qp->s_last = 0; |
596 | smp_wmb(); /* see qp_set_savail */ |
597 | } |
598 | if (qp->s_rdma_mr) { |
599 | rvt_put_mr(mr: qp->s_rdma_mr); |
600 | qp->s_rdma_mr = NULL; |
601 | } |
602 | } |
603 | |
604 | for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) { |
605 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; |
606 | |
607 | if (e->rdma_sge.mr) { |
608 | rvt_put_mr(mr: e->rdma_sge.mr); |
609 | e->rdma_sge.mr = NULL; |
610 | } |
611 | } |
612 | } |
613 | |
614 | /** |
615 | * rvt_swqe_has_lkey - return true if lkey is used by swqe |
616 | * @wqe: the send wqe |
617 | * @lkey: the lkey |
618 | * |
619 | * Test the swqe for using lkey |
620 | */ |
621 | static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) |
622 | { |
623 | int i; |
624 | |
625 | for (i = 0; i < wqe->wr.num_sge; i++) { |
626 | struct rvt_sge *sge = &wqe->sg_list[i]; |
627 | |
628 | if (rvt_mr_has_lkey(mr: sge->mr, lkey)) |
629 | return true; |
630 | } |
631 | return false; |
632 | } |
633 | |
634 | /** |
635 | * rvt_qp_sends_has_lkey - return true is qp sends use lkey |
636 | * @qp: the rvt_qp |
637 | * @lkey: the lkey |
638 | */ |
639 | static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) |
640 | { |
641 | u32 s_last = qp->s_last; |
642 | |
643 | while (s_last != qp->s_head) { |
644 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n: s_last); |
645 | |
646 | if (rvt_swqe_has_lkey(wqe, lkey)) |
647 | return true; |
648 | |
649 | if (++s_last >= qp->s_size) |
650 | s_last = 0; |
651 | } |
652 | if (qp->s_rdma_mr) |
653 | if (rvt_mr_has_lkey(mr: qp->s_rdma_mr, lkey)) |
654 | return true; |
655 | return false; |
656 | } |
657 | |
658 | /** |
659 | * rvt_qp_acks_has_lkey - return true if acks have lkey |
660 | * @qp: the qp |
661 | * @lkey: the lkey |
662 | */ |
663 | static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) |
664 | { |
665 | int i; |
666 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
667 | |
668 | for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) { |
669 | struct rvt_ack_entry *e = &qp->s_ack_queue[i]; |
670 | |
671 | if (rvt_mr_has_lkey(mr: e->rdma_sge.mr, lkey)) |
672 | return true; |
673 | } |
674 | return false; |
675 | } |
676 | |
677 | /** |
678 | * rvt_qp_mr_clean - clean up remote ops for lkey |
679 | * @qp: the qp |
680 | * @lkey: the lkey that is being de-registered |
681 | * |
682 | * This routine checks if the lkey is being used by |
683 | * the qp. |
684 | * |
685 | * If so, the qp is put into an error state to elminate |
686 | * any references from the qp. |
687 | */ |
688 | void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) |
689 | { |
690 | bool lastwqe = false; |
691 | |
692 | if (qp->ibqp.qp_type == IB_QPT_SMI || |
693 | qp->ibqp.qp_type == IB_QPT_GSI) |
694 | /* avoid special QPs */ |
695 | return; |
696 | spin_lock_irq(lock: &qp->r_lock); |
697 | spin_lock(lock: &qp->s_hlock); |
698 | spin_lock(lock: &qp->s_lock); |
699 | |
700 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) |
701 | goto check_lwqe; |
702 | |
703 | if (rvt_ss_has_lkey(ss: &qp->r_sge, lkey) || |
704 | rvt_qp_sends_has_lkey(qp, lkey) || |
705 | rvt_qp_acks_has_lkey(qp, lkey)) |
706 | lastwqe = rvt_error_qp(qp, err: IB_WC_LOC_PROT_ERR); |
707 | check_lwqe: |
708 | spin_unlock(lock: &qp->s_lock); |
709 | spin_unlock(lock: &qp->s_hlock); |
710 | spin_unlock_irq(lock: &qp->r_lock); |
711 | if (lastwqe) { |
712 | struct ib_event ev; |
713 | |
714 | ev.device = qp->ibqp.device; |
715 | ev.element.qp = &qp->ibqp; |
716 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
717 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
718 | } |
719 | } |
720 | |
721 | /** |
722 | * rvt_remove_qp - remove qp form table |
723 | * @rdi: rvt dev struct |
724 | * @qp: qp to remove |
725 | * |
726 | * Remove the QP from the table so it can't be found asynchronously by |
727 | * the receive routine. |
728 | */ |
729 | static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
730 | { |
731 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
732 | u32 n = hash_32(val: qp->ibqp.qp_num, bits: rdi->qp_dev->qp_table_bits); |
733 | unsigned long flags; |
734 | int removed = 1; |
735 | |
736 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); |
737 | |
738 | if (rcu_dereference_protected(rvp->qp[0], |
739 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { |
740 | RCU_INIT_POINTER(rvp->qp[0], NULL); |
741 | } else if (rcu_dereference_protected(rvp->qp[1], |
742 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { |
743 | RCU_INIT_POINTER(rvp->qp[1], NULL); |
744 | } else { |
745 | struct rvt_qp *q; |
746 | struct rvt_qp __rcu **qpp; |
747 | |
748 | removed = 0; |
749 | qpp = &rdi->qp_dev->qp_table[n]; |
750 | for (; (q = rcu_dereference_protected(*qpp, |
751 | lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; |
752 | qpp = &q->next) { |
753 | if (q == qp) { |
754 | RCU_INIT_POINTER(*qpp, |
755 | rcu_dereference_protected(qp->next, |
756 | lockdep_is_held(&rdi->qp_dev->qpt_lock))); |
757 | removed = 1; |
758 | trace_rvt_qpremove(qp, bucket: n); |
759 | break; |
760 | } |
761 | } |
762 | } |
763 | |
764 | spin_unlock_irqrestore(lock: &rdi->qp_dev->qpt_lock, flags); |
765 | if (removed) { |
766 | synchronize_rcu(); |
767 | rvt_put_qp(qp); |
768 | } |
769 | } |
770 | |
771 | /** |
772 | * rvt_alloc_rq - allocate memory for user or kernel buffer |
773 | * @rq: receive queue data structure |
774 | * @size: number of request queue entries |
775 | * @node: The NUMA node |
776 | * @udata: True if user data is available or not false |
777 | * |
778 | * Return: If memory allocation failed, return -ENONEM |
779 | * This function is used by both shared receive |
780 | * queues and non-shared receive queues to allocate |
781 | * memory. |
782 | */ |
783 | int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node, |
784 | struct ib_udata *udata) |
785 | { |
786 | if (udata) { |
787 | rq->wq = vmalloc_user(size: sizeof(struct rvt_rwq) + size); |
788 | if (!rq->wq) |
789 | goto bail; |
790 | /* need kwq with no buffers */ |
791 | rq->kwq = kzalloc_node(size: sizeof(*rq->kwq), GFP_KERNEL, node); |
792 | if (!rq->kwq) |
793 | goto bail; |
794 | rq->kwq->curr_wq = rq->wq->wq; |
795 | } else { |
796 | /* need kwq with buffers */ |
797 | rq->kwq = |
798 | vzalloc_node(size: sizeof(struct rvt_krwq) + size, node); |
799 | if (!rq->kwq) |
800 | goto bail; |
801 | rq->kwq->curr_wq = rq->kwq->wq; |
802 | } |
803 | |
804 | spin_lock_init(&rq->kwq->p_lock); |
805 | spin_lock_init(&rq->kwq->c_lock); |
806 | return 0; |
807 | bail: |
808 | rvt_free_rq(rq); |
809 | return -ENOMEM; |
810 | } |
811 | |
812 | /** |
813 | * rvt_init_qp - initialize the QP state to the reset state |
814 | * @rdi: rvt dev struct |
815 | * @qp: the QP to init or reinit |
816 | * @type: the QP type |
817 | * |
818 | * This function is called from both rvt_create_qp() and |
819 | * rvt_reset_qp(). The difference is that the reset |
820 | * patch the necessary locks to protect against concurent |
821 | * access. |
822 | */ |
823 | static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
824 | enum ib_qp_type type) |
825 | { |
826 | qp->remote_qpn = 0; |
827 | qp->qkey = 0; |
828 | qp->qp_access_flags = 0; |
829 | qp->s_flags &= RVT_S_SIGNAL_REQ_WR; |
830 | qp->s_hdrwords = 0; |
831 | qp->s_wqe = NULL; |
832 | qp->s_draining = 0; |
833 | qp->s_next_psn = 0; |
834 | qp->s_last_psn = 0; |
835 | qp->s_sending_psn = 0; |
836 | qp->s_sending_hpsn = 0; |
837 | qp->s_psn = 0; |
838 | qp->r_psn = 0; |
839 | qp->r_msn = 0; |
840 | if (type == IB_QPT_RC) { |
841 | qp->s_state = IB_OPCODE_RC_SEND_LAST; |
842 | qp->r_state = IB_OPCODE_RC_SEND_LAST; |
843 | } else { |
844 | qp->s_state = IB_OPCODE_UC_SEND_LAST; |
845 | qp->r_state = IB_OPCODE_UC_SEND_LAST; |
846 | } |
847 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
848 | qp->r_nak_state = 0; |
849 | qp->r_aflags = 0; |
850 | qp->r_flags = 0; |
851 | qp->s_head = 0; |
852 | qp->s_tail = 0; |
853 | qp->s_cur = 0; |
854 | qp->s_acked = 0; |
855 | qp->s_last = 0; |
856 | qp->s_ssn = 1; |
857 | qp->s_lsn = 0; |
858 | qp->s_mig_state = IB_MIG_MIGRATED; |
859 | qp->r_head_ack_queue = 0; |
860 | qp->s_tail_ack_queue = 0; |
861 | qp->s_acked_ack_queue = 0; |
862 | qp->s_num_rd_atomic = 0; |
863 | qp->r_sge.num_sge = 0; |
864 | atomic_set(v: &qp->s_reserved_used, i: 0); |
865 | } |
866 | |
867 | /** |
868 | * _rvt_reset_qp - initialize the QP state to the reset state |
869 | * @rdi: rvt dev struct |
870 | * @qp: the QP to reset |
871 | * @type: the QP type |
872 | * |
873 | * r_lock, s_hlock, and s_lock are required to be held by the caller |
874 | */ |
875 | static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
876 | enum ib_qp_type type) |
877 | __must_hold(&qp->s_lock) |
878 | __must_hold(&qp->s_hlock) |
879 | __must_hold(&qp->r_lock) |
880 | { |
881 | lockdep_assert_held(&qp->r_lock); |
882 | lockdep_assert_held(&qp->s_hlock); |
883 | lockdep_assert_held(&qp->s_lock); |
884 | if (qp->state != IB_QPS_RESET) { |
885 | qp->state = IB_QPS_RESET; |
886 | |
887 | /* Let drivers flush their waitlist */ |
888 | rdi->driver_f.flush_qp_waiters(qp); |
889 | rvt_stop_rc_timers(qp); |
890 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); |
891 | spin_unlock(lock: &qp->s_lock); |
892 | spin_unlock(lock: &qp->s_hlock); |
893 | spin_unlock_irq(lock: &qp->r_lock); |
894 | |
895 | /* Stop the send queue and the retry timer */ |
896 | rdi->driver_f.stop_send_queue(qp); |
897 | rvt_del_timers_sync(qp); |
898 | /* Wait for things to stop */ |
899 | rdi->driver_f.quiesce_qp(qp); |
900 | |
901 | /* take qp out the hash and wait for it to be unused */ |
902 | rvt_remove_qp(rdi, qp); |
903 | |
904 | /* grab the lock b/c it was locked at call time */ |
905 | spin_lock_irq(lock: &qp->r_lock); |
906 | spin_lock(lock: &qp->s_hlock); |
907 | spin_lock(lock: &qp->s_lock); |
908 | |
909 | rvt_clear_mr_refs(qp, clr_sends: 1); |
910 | /* |
911 | * Let the driver do any tear down or re-init it needs to for |
912 | * a qp that has been reset |
913 | */ |
914 | rdi->driver_f.notify_qp_reset(qp); |
915 | } |
916 | rvt_init_qp(rdi, qp, type); |
917 | lockdep_assert_held(&qp->r_lock); |
918 | lockdep_assert_held(&qp->s_hlock); |
919 | lockdep_assert_held(&qp->s_lock); |
920 | } |
921 | |
922 | /** |
923 | * rvt_reset_qp - initialize the QP state to the reset state |
924 | * @rdi: the device info |
925 | * @qp: the QP to reset |
926 | * @type: the QP type |
927 | * |
928 | * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock |
929 | * before calling _rvt_reset_qp(). |
930 | */ |
931 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
932 | enum ib_qp_type type) |
933 | { |
934 | spin_lock_irq(lock: &qp->r_lock); |
935 | spin_lock(lock: &qp->s_hlock); |
936 | spin_lock(lock: &qp->s_lock); |
937 | _rvt_reset_qp(rdi, qp, type); |
938 | spin_unlock(lock: &qp->s_lock); |
939 | spin_unlock(lock: &qp->s_hlock); |
940 | spin_unlock_irq(lock: &qp->r_lock); |
941 | } |
942 | |
943 | /** |
944 | * rvt_free_qpn - Free a qpn from the bit map |
945 | * @qpt: QP table |
946 | * @qpn: queue pair number to free |
947 | */ |
948 | static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) |
949 | { |
950 | struct rvt_qpn_map *map; |
951 | |
952 | if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE) |
953 | qpn &= RVT_AIP_QP_SUFFIX; |
954 | |
955 | map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; |
956 | if (map->page) |
957 | clear_bit(nr: qpn & RVT_BITS_PER_PAGE_MASK, addr: map->page); |
958 | } |
959 | |
960 | /** |
961 | * get_allowed_ops - Given a QP type return the appropriate allowed OP |
962 | * @type: valid, supported, QP type |
963 | */ |
964 | static u8 get_allowed_ops(enum ib_qp_type type) |
965 | { |
966 | return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ? |
967 | IB_OPCODE_UC : IB_OPCODE_UD; |
968 | } |
969 | |
970 | /** |
971 | * free_ud_wq_attr - Clean up AH attribute cache for UD QPs |
972 | * @qp: Valid QP with allowed_ops set |
973 | * |
974 | * The rvt_swqe data structure being used is a union, so this is |
975 | * only valid for UD QPs. |
976 | */ |
977 | static void free_ud_wq_attr(struct rvt_qp *qp) |
978 | { |
979 | struct rvt_swqe *wqe; |
980 | int i; |
981 | |
982 | for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { |
983 | wqe = rvt_get_swqe_ptr(qp, n: i); |
984 | kfree(objp: wqe->ud_wr.attr); |
985 | wqe->ud_wr.attr = NULL; |
986 | } |
987 | } |
988 | |
989 | /** |
990 | * alloc_ud_wq_attr - AH attribute cache for UD QPs |
991 | * @qp: Valid QP with allowed_ops set |
992 | * @node: Numa node for allocation |
993 | * |
994 | * The rvt_swqe data structure being used is a union, so this is |
995 | * only valid for UD QPs. |
996 | */ |
997 | static int alloc_ud_wq_attr(struct rvt_qp *qp, int node) |
998 | { |
999 | struct rvt_swqe *wqe; |
1000 | int i; |
1001 | |
1002 | for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { |
1003 | wqe = rvt_get_swqe_ptr(qp, n: i); |
1004 | wqe->ud_wr.attr = kzalloc_node(size: sizeof(*wqe->ud_wr.attr), |
1005 | GFP_KERNEL, node); |
1006 | if (!wqe->ud_wr.attr) { |
1007 | free_ud_wq_attr(qp); |
1008 | return -ENOMEM; |
1009 | } |
1010 | } |
1011 | |
1012 | return 0; |
1013 | } |
1014 | |
1015 | /** |
1016 | * rvt_create_qp - create a queue pair for a device |
1017 | * @ibqp: the queue pair |
1018 | * @init_attr: the attributes of the queue pair |
1019 | * @udata: user data for libibverbs.so |
1020 | * |
1021 | * Queue pair creation is mostly an rvt issue. However, drivers have their own |
1022 | * unique idea of what queue pair numbers mean. For instance there is a reserved |
1023 | * range for PSM. |
1024 | * |
1025 | * Return: 0 on success, otherwise returns an errno. |
1026 | * |
1027 | * Called by the ib_create_qp() core verbs function. |
1028 | */ |
1029 | int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, |
1030 | struct ib_udata *udata) |
1031 | { |
1032 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1033 | int ret = -ENOMEM; |
1034 | struct rvt_swqe *swq = NULL; |
1035 | size_t sz; |
1036 | size_t sg_list_sz = 0; |
1037 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
1038 | void *priv = NULL; |
1039 | size_t sqsize; |
1040 | u8 exclude_prefix = 0; |
1041 | |
1042 | if (!rdi) |
1043 | return -EINVAL; |
1044 | |
1045 | if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE) |
1046 | return -EOPNOTSUPP; |
1047 | |
1048 | if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || |
1049 | init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr) |
1050 | return -EINVAL; |
1051 | |
1052 | /* Check receive queue parameters if no SRQ is specified. */ |
1053 | if (!init_attr->srq) { |
1054 | if (init_attr->cap.max_recv_sge > |
1055 | rdi->dparms.props.max_recv_sge || |
1056 | init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) |
1057 | return -EINVAL; |
1058 | |
1059 | if (init_attr->cap.max_send_sge + |
1060 | init_attr->cap.max_send_wr + |
1061 | init_attr->cap.max_recv_sge + |
1062 | init_attr->cap.max_recv_wr == 0) |
1063 | return -EINVAL; |
1064 | } |
1065 | sqsize = |
1066 | init_attr->cap.max_send_wr + 1 + |
1067 | rdi->dparms.reserved_operations; |
1068 | switch (init_attr->qp_type) { |
1069 | case IB_QPT_SMI: |
1070 | case IB_QPT_GSI: |
1071 | if (init_attr->port_num == 0 || |
1072 | init_attr->port_num > ibqp->device->phys_port_cnt) |
1073 | return -EINVAL; |
1074 | fallthrough; |
1075 | case IB_QPT_UC: |
1076 | case IB_QPT_RC: |
1077 | case IB_QPT_UD: |
1078 | sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge); |
1079 | swq = vzalloc_node(array_size(sz, sqsize), node: rdi->dparms.node); |
1080 | if (!swq) |
1081 | return -ENOMEM; |
1082 | |
1083 | if (init_attr->srq) { |
1084 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq: init_attr->srq); |
1085 | |
1086 | if (srq->rq.max_sge > 1) |
1087 | sg_list_sz = sizeof(*qp->r_sg_list) * |
1088 | (srq->rq.max_sge - 1); |
1089 | } else if (init_attr->cap.max_recv_sge > 1) |
1090 | sg_list_sz = sizeof(*qp->r_sg_list) * |
1091 | (init_attr->cap.max_recv_sge - 1); |
1092 | qp->r_sg_list = |
1093 | kzalloc_node(size: sg_list_sz, GFP_KERNEL, node: rdi->dparms.node); |
1094 | if (!qp->r_sg_list) |
1095 | goto bail_qp; |
1096 | qp->allowed_ops = get_allowed_ops(type: init_attr->qp_type); |
1097 | |
1098 | RCU_INIT_POINTER(qp->next, NULL); |
1099 | if (init_attr->qp_type == IB_QPT_RC) { |
1100 | qp->s_ack_queue = |
1101 | kcalloc_node(n: rvt_max_atomic(rdi), |
1102 | size: sizeof(*qp->s_ack_queue), |
1103 | GFP_KERNEL, |
1104 | node: rdi->dparms.node); |
1105 | if (!qp->s_ack_queue) |
1106 | goto bail_qp; |
1107 | } |
1108 | /* initialize timers needed for rc qp */ |
1109 | timer_setup(&qp->s_timer, rvt_rc_timeout, 0); |
1110 | hrtimer_init(timer: &qp->s_rnr_timer, CLOCK_MONOTONIC, |
1111 | mode: HRTIMER_MODE_REL); |
1112 | qp->s_rnr_timer.function = rvt_rc_rnr_retry; |
1113 | |
1114 | /* |
1115 | * Driver needs to set up it's private QP structure and do any |
1116 | * initialization that is needed. |
1117 | */ |
1118 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp); |
1119 | if (IS_ERR(ptr: priv)) { |
1120 | ret = PTR_ERR(ptr: priv); |
1121 | goto bail_qp; |
1122 | } |
1123 | qp->priv = priv; |
1124 | qp->timeout_jiffies = |
1125 | usecs_to_jiffies(u: (4096UL * (1UL << qp->timeout)) / |
1126 | 1000UL); |
1127 | if (init_attr->srq) { |
1128 | sz = 0; |
1129 | } else { |
1130 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; |
1131 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; |
1132 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + |
1133 | sizeof(struct rvt_rwqe); |
1134 | ret = rvt_alloc_rq(rq: &qp->r_rq, size: qp->r_rq.size * sz, |
1135 | node: rdi->dparms.node, udata); |
1136 | if (ret) |
1137 | goto bail_driver_priv; |
1138 | } |
1139 | |
1140 | /* |
1141 | * ib_create_qp() will initialize qp->ibqp |
1142 | * except for qp->ibqp.qp_num. |
1143 | */ |
1144 | spin_lock_init(&qp->r_lock); |
1145 | spin_lock_init(&qp->s_hlock); |
1146 | spin_lock_init(&qp->s_lock); |
1147 | atomic_set(v: &qp->refcount, i: 0); |
1148 | atomic_set(v: &qp->local_ops_pending, i: 0); |
1149 | init_waitqueue_head(&qp->wait); |
1150 | INIT_LIST_HEAD(list: &qp->rspwait); |
1151 | qp->state = IB_QPS_RESET; |
1152 | qp->s_wq = swq; |
1153 | qp->s_size = sqsize; |
1154 | qp->s_avail = init_attr->cap.max_send_wr; |
1155 | qp->s_max_sge = init_attr->cap.max_send_sge; |
1156 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
1157 | qp->s_flags = RVT_S_SIGNAL_REQ_WR; |
1158 | ret = alloc_ud_wq_attr(qp, node: rdi->dparms.node); |
1159 | if (ret) |
1160 | goto bail_rq_rvt; |
1161 | |
1162 | if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) |
1163 | exclude_prefix = RVT_AIP_QP_PREFIX; |
1164 | |
1165 | ret = alloc_qpn(rdi, qpt: &rdi->qp_dev->qpn_table, |
1166 | type: init_attr->qp_type, |
1167 | port_num: init_attr->port_num, |
1168 | exclude_prefix); |
1169 | if (ret < 0) |
1170 | goto bail_rq_wq; |
1171 | |
1172 | qp->ibqp.qp_num = ret; |
1173 | if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) |
1174 | qp->ibqp.qp_num |= RVT_AIP_QP_BASE; |
1175 | qp->port_num = init_attr->port_num; |
1176 | rvt_init_qp(rdi, qp, type: init_attr->qp_type); |
1177 | if (rdi->driver_f.qp_priv_init) { |
1178 | ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); |
1179 | if (ret) |
1180 | goto bail_rq_wq; |
1181 | } |
1182 | break; |
1183 | |
1184 | default: |
1185 | /* Don't support raw QPs */ |
1186 | return -EOPNOTSUPP; |
1187 | } |
1188 | |
1189 | init_attr->cap.max_inline_data = 0; |
1190 | |
1191 | /* |
1192 | * Return the address of the RWQ as the offset to mmap. |
1193 | * See rvt_mmap() for details. |
1194 | */ |
1195 | if (udata && udata->outlen >= sizeof(__u64)) { |
1196 | if (!qp->r_rq.wq) { |
1197 | __u64 offset = 0; |
1198 | |
1199 | ret = ib_copy_to_udata(udata, src: &offset, |
1200 | len: sizeof(offset)); |
1201 | if (ret) |
1202 | goto bail_qpn; |
1203 | } else { |
1204 | u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; |
1205 | |
1206 | qp->ip = rvt_create_mmap_info(rdi, size: s, udata, |
1207 | obj: qp->r_rq.wq); |
1208 | if (IS_ERR(ptr: qp->ip)) { |
1209 | ret = PTR_ERR(ptr: qp->ip); |
1210 | goto bail_qpn; |
1211 | } |
1212 | |
1213 | ret = ib_copy_to_udata(udata, src: &qp->ip->offset, |
1214 | len: sizeof(qp->ip->offset)); |
1215 | if (ret) |
1216 | goto bail_ip; |
1217 | } |
1218 | qp->pid = current->pid; |
1219 | } |
1220 | |
1221 | spin_lock(lock: &rdi->n_qps_lock); |
1222 | if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { |
1223 | spin_unlock(lock: &rdi->n_qps_lock); |
1224 | ret = -ENOMEM; |
1225 | goto bail_ip; |
1226 | } |
1227 | |
1228 | rdi->n_qps_allocated++; |
1229 | /* |
1230 | * Maintain a busy_jiffies variable that will be added to the timeout |
1231 | * period in mod_retry_timer and add_retry_timer. This busy jiffies |
1232 | * is scaled by the number of rc qps created for the device to reduce |
1233 | * the number of timeouts occurring when there is a large number of |
1234 | * qps. busy_jiffies is incremented every rc qp scaling interval. |
1235 | * The scaling interval is selected based on extensive performance |
1236 | * evaluation of targeted workloads. |
1237 | */ |
1238 | if (init_attr->qp_type == IB_QPT_RC) { |
1239 | rdi->n_rc_qps++; |
1240 | rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; |
1241 | } |
1242 | spin_unlock(lock: &rdi->n_qps_lock); |
1243 | |
1244 | if (qp->ip) { |
1245 | spin_lock_irq(lock: &rdi->pending_lock); |
1246 | list_add(new: &qp->ip->pending_mmaps, head: &rdi->pending_mmaps); |
1247 | spin_unlock_irq(lock: &rdi->pending_lock); |
1248 | } |
1249 | |
1250 | return 0; |
1251 | |
1252 | bail_ip: |
1253 | if (qp->ip) |
1254 | kref_put(kref: &qp->ip->ref, release: rvt_release_mmap_info); |
1255 | |
1256 | bail_qpn: |
1257 | rvt_free_qpn(qpt: &rdi->qp_dev->qpn_table, qpn: qp->ibqp.qp_num); |
1258 | |
1259 | bail_rq_wq: |
1260 | free_ud_wq_attr(qp); |
1261 | |
1262 | bail_rq_rvt: |
1263 | rvt_free_rq(rq: &qp->r_rq); |
1264 | |
1265 | bail_driver_priv: |
1266 | rdi->driver_f.qp_priv_free(rdi, qp); |
1267 | |
1268 | bail_qp: |
1269 | kfree(objp: qp->s_ack_queue); |
1270 | kfree(objp: qp->r_sg_list); |
1271 | vfree(addr: swq); |
1272 | return ret; |
1273 | } |
1274 | |
1275 | /** |
1276 | * rvt_error_qp - put a QP into the error state |
1277 | * @qp: the QP to put into the error state |
1278 | * @err: the receive completion error to signal if a RWQE is active |
1279 | * |
1280 | * Flushes both send and receive work queues. |
1281 | * |
1282 | * Return: true if last WQE event should be generated. |
1283 | * The QP r_lock and s_lock should be held and interrupts disabled. |
1284 | * If we are already in error state, just return. |
1285 | */ |
1286 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) |
1287 | { |
1288 | struct ib_wc wc; |
1289 | int ret = 0; |
1290 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
1291 | |
1292 | lockdep_assert_held(&qp->r_lock); |
1293 | lockdep_assert_held(&qp->s_lock); |
1294 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) |
1295 | goto bail; |
1296 | |
1297 | qp->state = IB_QPS_ERR; |
1298 | |
1299 | if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { |
1300 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); |
1301 | del_timer(timer: &qp->s_timer); |
1302 | } |
1303 | |
1304 | if (qp->s_flags & RVT_S_ANY_WAIT_SEND) |
1305 | qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; |
1306 | |
1307 | rdi->driver_f.notify_error_qp(qp); |
1308 | |
1309 | /* Schedule the sending tasklet to drain the send work queue. */ |
1310 | if (READ_ONCE(qp->s_last) != qp->s_head) |
1311 | rdi->driver_f.schedule_send(qp); |
1312 | |
1313 | rvt_clear_mr_refs(qp, clr_sends: 0); |
1314 | |
1315 | memset(&wc, 0, sizeof(wc)); |
1316 | wc.qp = &qp->ibqp; |
1317 | wc.opcode = IB_WC_RECV; |
1318 | |
1319 | if (test_and_clear_bit(RVT_R_WRID_VALID, addr: &qp->r_aflags)) { |
1320 | wc.wr_id = qp->r_wr_id; |
1321 | wc.status = err; |
1322 | rvt_cq_enter(cq: ibcq_to_rvtcq(ibcq: qp->ibqp.recv_cq), entry: &wc, solicited: 1); |
1323 | } |
1324 | wc.status = IB_WC_WR_FLUSH_ERR; |
1325 | |
1326 | if (qp->r_rq.kwq) { |
1327 | u32 head; |
1328 | u32 tail; |
1329 | struct rvt_rwq *wq = NULL; |
1330 | struct rvt_krwq *kwq = NULL; |
1331 | |
1332 | spin_lock(lock: &qp->r_rq.kwq->c_lock); |
1333 | /* qp->ip used to validate if there is a user buffer mmaped */ |
1334 | if (qp->ip) { |
1335 | wq = qp->r_rq.wq; |
1336 | head = RDMA_READ_UAPI_ATOMIC(wq->head); |
1337 | tail = RDMA_READ_UAPI_ATOMIC(wq->tail); |
1338 | } else { |
1339 | kwq = qp->r_rq.kwq; |
1340 | head = kwq->head; |
1341 | tail = kwq->tail; |
1342 | } |
1343 | /* sanity check pointers before trusting them */ |
1344 | if (head >= qp->r_rq.size) |
1345 | head = 0; |
1346 | if (tail >= qp->r_rq.size) |
1347 | tail = 0; |
1348 | while (tail != head) { |
1349 | wc.wr_id = rvt_get_rwqe_ptr(rq: &qp->r_rq, n: tail)->wr_id; |
1350 | if (++tail >= qp->r_rq.size) |
1351 | tail = 0; |
1352 | rvt_cq_enter(cq: ibcq_to_rvtcq(ibcq: qp->ibqp.recv_cq), entry: &wc, solicited: 1); |
1353 | } |
1354 | if (qp->ip) |
1355 | RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); |
1356 | else |
1357 | kwq->tail = tail; |
1358 | spin_unlock(lock: &qp->r_rq.kwq->c_lock); |
1359 | } else if (qp->ibqp.event_handler) { |
1360 | ret = 1; |
1361 | } |
1362 | |
1363 | bail: |
1364 | return ret; |
1365 | } |
1366 | EXPORT_SYMBOL(rvt_error_qp); |
1367 | |
1368 | /* |
1369 | * Put the QP into the hash table. |
1370 | * The hash table holds a reference to the QP. |
1371 | */ |
1372 | static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
1373 | { |
1374 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
1375 | unsigned long flags; |
1376 | |
1377 | rvt_get_qp(qp); |
1378 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); |
1379 | |
1380 | if (qp->ibqp.qp_num <= 1) { |
1381 | rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); |
1382 | } else { |
1383 | u32 n = hash_32(val: qp->ibqp.qp_num, bits: rdi->qp_dev->qp_table_bits); |
1384 | |
1385 | qp->next = rdi->qp_dev->qp_table[n]; |
1386 | rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); |
1387 | trace_rvt_qpinsert(qp, bucket: n); |
1388 | } |
1389 | |
1390 | spin_unlock_irqrestore(lock: &rdi->qp_dev->qpt_lock, flags); |
1391 | } |
1392 | |
1393 | /** |
1394 | * rvt_modify_qp - modify the attributes of a queue pair |
1395 | * @ibqp: the queue pair who's attributes we're modifying |
1396 | * @attr: the new attributes |
1397 | * @attr_mask: the mask of attributes to modify |
1398 | * @udata: user data for libibverbs.so |
1399 | * |
1400 | * Return: 0 on success, otherwise returns an errno. |
1401 | */ |
1402 | int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
1403 | int attr_mask, struct ib_udata *udata) |
1404 | { |
1405 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
1406 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1407 | enum ib_qp_state cur_state, new_state; |
1408 | struct ib_event ev; |
1409 | int lastwqe = 0; |
1410 | int mig = 0; |
1411 | int pmtu = 0; /* for gcc warning only */ |
1412 | int opa_ah; |
1413 | |
1414 | if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) |
1415 | return -EOPNOTSUPP; |
1416 | |
1417 | spin_lock_irq(lock: &qp->r_lock); |
1418 | spin_lock(lock: &qp->s_hlock); |
1419 | spin_lock(lock: &qp->s_lock); |
1420 | |
1421 | cur_state = attr_mask & IB_QP_CUR_STATE ? |
1422 | attr->cur_qp_state : qp->state; |
1423 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1424 | opa_ah = rdma_cap_opa_ah(device: ibqp->device, port_num: qp->port_num); |
1425 | |
1426 | if (!ib_modify_qp_is_ok(cur_state, next_state: new_state, type: ibqp->qp_type, |
1427 | mask: attr_mask)) |
1428 | goto inval; |
1429 | |
1430 | if (rdi->driver_f.check_modify_qp && |
1431 | rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) |
1432 | goto inval; |
1433 | |
1434 | if (attr_mask & IB_QP_AV) { |
1435 | if (opa_ah) { |
1436 | if (rdma_ah_get_dlid(attr: &attr->ah_attr) >= |
1437 | opa_get_mcast_base(OPA_MCAST_NR)) |
1438 | goto inval; |
1439 | } else { |
1440 | if (rdma_ah_get_dlid(attr: &attr->ah_attr) >= |
1441 | be16_to_cpu(IB_MULTICAST_LID_BASE)) |
1442 | goto inval; |
1443 | } |
1444 | |
1445 | if (rvt_check_ah(ibdev: qp->ibqp.device, ah_attr: &attr->ah_attr)) |
1446 | goto inval; |
1447 | } |
1448 | |
1449 | if (attr_mask & IB_QP_ALT_PATH) { |
1450 | if (opa_ah) { |
1451 | if (rdma_ah_get_dlid(attr: &attr->alt_ah_attr) >= |
1452 | opa_get_mcast_base(OPA_MCAST_NR)) |
1453 | goto inval; |
1454 | } else { |
1455 | if (rdma_ah_get_dlid(attr: &attr->alt_ah_attr) >= |
1456 | be16_to_cpu(IB_MULTICAST_LID_BASE)) |
1457 | goto inval; |
1458 | } |
1459 | |
1460 | if (rvt_check_ah(ibdev: qp->ibqp.device, ah_attr: &attr->alt_ah_attr)) |
1461 | goto inval; |
1462 | if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) |
1463 | goto inval; |
1464 | } |
1465 | |
1466 | if (attr_mask & IB_QP_PKEY_INDEX) |
1467 | if (attr->pkey_index >= rvt_get_npkeys(rdi)) |
1468 | goto inval; |
1469 | |
1470 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
1471 | if (attr->min_rnr_timer > 31) |
1472 | goto inval; |
1473 | |
1474 | if (attr_mask & IB_QP_PORT) |
1475 | if (qp->ibqp.qp_type == IB_QPT_SMI || |
1476 | qp->ibqp.qp_type == IB_QPT_GSI || |
1477 | attr->port_num == 0 || |
1478 | attr->port_num > ibqp->device->phys_port_cnt) |
1479 | goto inval; |
1480 | |
1481 | if (attr_mask & IB_QP_DEST_QPN) |
1482 | if (attr->dest_qp_num > RVT_QPN_MASK) |
1483 | goto inval; |
1484 | |
1485 | if (attr_mask & IB_QP_RETRY_CNT) |
1486 | if (attr->retry_cnt > 7) |
1487 | goto inval; |
1488 | |
1489 | if (attr_mask & IB_QP_RNR_RETRY) |
1490 | if (attr->rnr_retry > 7) |
1491 | goto inval; |
1492 | |
1493 | /* |
1494 | * Don't allow invalid path_mtu values. OK to set greater |
1495 | * than the active mtu (or even the max_cap, if we have tuned |
1496 | * that to a small mtu. We'll set qp->path_mtu |
1497 | * to the lesser of requested attribute mtu and active, |
1498 | * for packetizing messages. |
1499 | * Note that the QP port has to be set in INIT and MTU in RTR. |
1500 | */ |
1501 | if (attr_mask & IB_QP_PATH_MTU) { |
1502 | pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); |
1503 | if (pmtu < 0) |
1504 | goto inval; |
1505 | } |
1506 | |
1507 | if (attr_mask & IB_QP_PATH_MIG_STATE) { |
1508 | if (attr->path_mig_state == IB_MIG_REARM) { |
1509 | if (qp->s_mig_state == IB_MIG_ARMED) |
1510 | goto inval; |
1511 | if (new_state != IB_QPS_RTS) |
1512 | goto inval; |
1513 | } else if (attr->path_mig_state == IB_MIG_MIGRATED) { |
1514 | if (qp->s_mig_state == IB_MIG_REARM) |
1515 | goto inval; |
1516 | if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) |
1517 | goto inval; |
1518 | if (qp->s_mig_state == IB_MIG_ARMED) |
1519 | mig = 1; |
1520 | } else { |
1521 | goto inval; |
1522 | } |
1523 | } |
1524 | |
1525 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
1526 | if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) |
1527 | goto inval; |
1528 | |
1529 | switch (new_state) { |
1530 | case IB_QPS_RESET: |
1531 | if (qp->state != IB_QPS_RESET) |
1532 | _rvt_reset_qp(rdi, qp, type: ibqp->qp_type); |
1533 | break; |
1534 | |
1535 | case IB_QPS_RTR: |
1536 | /* Allow event to re-trigger if QP set to RTR more than once */ |
1537 | qp->r_flags &= ~RVT_R_COMM_EST; |
1538 | qp->state = new_state; |
1539 | break; |
1540 | |
1541 | case IB_QPS_SQD: |
1542 | qp->s_draining = qp->s_last != qp->s_cur; |
1543 | qp->state = new_state; |
1544 | break; |
1545 | |
1546 | case IB_QPS_SQE: |
1547 | if (qp->ibqp.qp_type == IB_QPT_RC) |
1548 | goto inval; |
1549 | qp->state = new_state; |
1550 | break; |
1551 | |
1552 | case IB_QPS_ERR: |
1553 | lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); |
1554 | break; |
1555 | |
1556 | default: |
1557 | qp->state = new_state; |
1558 | break; |
1559 | } |
1560 | |
1561 | if (attr_mask & IB_QP_PKEY_INDEX) |
1562 | qp->s_pkey_index = attr->pkey_index; |
1563 | |
1564 | if (attr_mask & IB_QP_PORT) |
1565 | qp->port_num = attr->port_num; |
1566 | |
1567 | if (attr_mask & IB_QP_DEST_QPN) |
1568 | qp->remote_qpn = attr->dest_qp_num; |
1569 | |
1570 | if (attr_mask & IB_QP_SQ_PSN) { |
1571 | qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; |
1572 | qp->s_psn = qp->s_next_psn; |
1573 | qp->s_sending_psn = qp->s_next_psn; |
1574 | qp->s_last_psn = qp->s_next_psn - 1; |
1575 | qp->s_sending_hpsn = qp->s_last_psn; |
1576 | } |
1577 | |
1578 | if (attr_mask & IB_QP_RQ_PSN) |
1579 | qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; |
1580 | |
1581 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
1582 | qp->qp_access_flags = attr->qp_access_flags; |
1583 | |
1584 | if (attr_mask & IB_QP_AV) { |
1585 | rdma_replace_ah_attr(old: &qp->remote_ah_attr, new: &attr->ah_attr); |
1586 | qp->s_srate = rdma_ah_get_static_rate(attr: &attr->ah_attr); |
1587 | qp->srate_mbps = ib_rate_to_mbps(rate: qp->s_srate); |
1588 | } |
1589 | |
1590 | if (attr_mask & IB_QP_ALT_PATH) { |
1591 | rdma_replace_ah_attr(old: &qp->alt_ah_attr, new: &attr->alt_ah_attr); |
1592 | qp->s_alt_pkey_index = attr->alt_pkey_index; |
1593 | } |
1594 | |
1595 | if (attr_mask & IB_QP_PATH_MIG_STATE) { |
1596 | qp->s_mig_state = attr->path_mig_state; |
1597 | if (mig) { |
1598 | qp->remote_ah_attr = qp->alt_ah_attr; |
1599 | qp->port_num = rdma_ah_get_port_num(attr: &qp->alt_ah_attr); |
1600 | qp->s_pkey_index = qp->s_alt_pkey_index; |
1601 | } |
1602 | } |
1603 | |
1604 | if (attr_mask & IB_QP_PATH_MTU) { |
1605 | qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); |
1606 | qp->log_pmtu = ilog2(qp->pmtu); |
1607 | } |
1608 | |
1609 | if (attr_mask & IB_QP_RETRY_CNT) { |
1610 | qp->s_retry_cnt = attr->retry_cnt; |
1611 | qp->s_retry = attr->retry_cnt; |
1612 | } |
1613 | |
1614 | if (attr_mask & IB_QP_RNR_RETRY) { |
1615 | qp->s_rnr_retry_cnt = attr->rnr_retry; |
1616 | qp->s_rnr_retry = attr->rnr_retry; |
1617 | } |
1618 | |
1619 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
1620 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
1621 | |
1622 | if (attr_mask & IB_QP_TIMEOUT) { |
1623 | qp->timeout = attr->timeout; |
1624 | qp->timeout_jiffies = rvt_timeout_to_jiffies(timeout: qp->timeout); |
1625 | } |
1626 | |
1627 | if (attr_mask & IB_QP_QKEY) |
1628 | qp->qkey = attr->qkey; |
1629 | |
1630 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
1631 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; |
1632 | |
1633 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) |
1634 | qp->s_max_rd_atomic = attr->max_rd_atomic; |
1635 | |
1636 | if (rdi->driver_f.modify_qp) |
1637 | rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); |
1638 | |
1639 | spin_unlock(lock: &qp->s_lock); |
1640 | spin_unlock(lock: &qp->s_hlock); |
1641 | spin_unlock_irq(lock: &qp->r_lock); |
1642 | |
1643 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
1644 | rvt_insert_qp(rdi, qp); |
1645 | |
1646 | if (lastwqe) { |
1647 | ev.device = qp->ibqp.device; |
1648 | ev.element.qp = &qp->ibqp; |
1649 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
1650 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
1651 | } |
1652 | if (mig) { |
1653 | ev.device = qp->ibqp.device; |
1654 | ev.element.qp = &qp->ibqp; |
1655 | ev.event = IB_EVENT_PATH_MIG; |
1656 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
1657 | } |
1658 | return 0; |
1659 | |
1660 | inval: |
1661 | spin_unlock(lock: &qp->s_lock); |
1662 | spin_unlock(lock: &qp->s_hlock); |
1663 | spin_unlock_irq(lock: &qp->r_lock); |
1664 | return -EINVAL; |
1665 | } |
1666 | |
1667 | /** |
1668 | * rvt_destroy_qp - destroy a queue pair |
1669 | * @ibqp: the queue pair to destroy |
1670 | * @udata: unused by the driver |
1671 | * |
1672 | * Note that this can be called while the QP is actively sending or |
1673 | * receiving! |
1674 | * |
1675 | * Return: 0 on success. |
1676 | */ |
1677 | int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
1678 | { |
1679 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1680 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
1681 | |
1682 | rvt_reset_qp(rdi, qp, type: ibqp->qp_type); |
1683 | |
1684 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
1685 | /* qpn is now available for use again */ |
1686 | rvt_free_qpn(qpt: &rdi->qp_dev->qpn_table, qpn: qp->ibqp.qp_num); |
1687 | |
1688 | spin_lock(lock: &rdi->n_qps_lock); |
1689 | rdi->n_qps_allocated--; |
1690 | if (qp->ibqp.qp_type == IB_QPT_RC) { |
1691 | rdi->n_rc_qps--; |
1692 | rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; |
1693 | } |
1694 | spin_unlock(lock: &rdi->n_qps_lock); |
1695 | |
1696 | if (qp->ip) |
1697 | kref_put(kref: &qp->ip->ref, release: rvt_release_mmap_info); |
1698 | kvfree(addr: qp->r_rq.kwq); |
1699 | rdi->driver_f.qp_priv_free(rdi, qp); |
1700 | kfree(objp: qp->s_ack_queue); |
1701 | kfree(objp: qp->r_sg_list); |
1702 | rdma_destroy_ah_attr(ah_attr: &qp->remote_ah_attr); |
1703 | rdma_destroy_ah_attr(ah_attr: &qp->alt_ah_attr); |
1704 | free_ud_wq_attr(qp); |
1705 | vfree(addr: qp->s_wq); |
1706 | return 0; |
1707 | } |
1708 | |
1709 | /** |
1710 | * rvt_query_qp - query an ipbq |
1711 | * @ibqp: IB qp to query |
1712 | * @attr: attr struct to fill in |
1713 | * @attr_mask: attr mask ignored |
1714 | * @init_attr: struct to fill in |
1715 | * |
1716 | * Return: always 0 |
1717 | */ |
1718 | int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
1719 | int attr_mask, struct ib_qp_init_attr *init_attr) |
1720 | { |
1721 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1722 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
1723 | |
1724 | attr->qp_state = qp->state; |
1725 | attr->cur_qp_state = attr->qp_state; |
1726 | attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); |
1727 | attr->path_mig_state = qp->s_mig_state; |
1728 | attr->qkey = qp->qkey; |
1729 | attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; |
1730 | attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; |
1731 | attr->dest_qp_num = qp->remote_qpn; |
1732 | attr->qp_access_flags = qp->qp_access_flags; |
1733 | attr->cap.max_send_wr = qp->s_size - 1 - |
1734 | rdi->dparms.reserved_operations; |
1735 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; |
1736 | attr->cap.max_send_sge = qp->s_max_sge; |
1737 | attr->cap.max_recv_sge = qp->r_rq.max_sge; |
1738 | attr->cap.max_inline_data = 0; |
1739 | attr->ah_attr = qp->remote_ah_attr; |
1740 | attr->alt_ah_attr = qp->alt_ah_attr; |
1741 | attr->pkey_index = qp->s_pkey_index; |
1742 | attr->alt_pkey_index = qp->s_alt_pkey_index; |
1743 | attr->en_sqd_async_notify = 0; |
1744 | attr->sq_draining = qp->s_draining; |
1745 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
1746 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; |
1747 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
1748 | attr->port_num = qp->port_num; |
1749 | attr->timeout = qp->timeout; |
1750 | attr->retry_cnt = qp->s_retry_cnt; |
1751 | attr->rnr_retry = qp->s_rnr_retry_cnt; |
1752 | attr->alt_port_num = |
1753 | rdma_ah_get_port_num(attr: &qp->alt_ah_attr); |
1754 | attr->alt_timeout = qp->alt_timeout; |
1755 | |
1756 | init_attr->event_handler = qp->ibqp.event_handler; |
1757 | init_attr->qp_context = qp->ibqp.qp_context; |
1758 | init_attr->send_cq = qp->ibqp.send_cq; |
1759 | init_attr->recv_cq = qp->ibqp.recv_cq; |
1760 | init_attr->srq = qp->ibqp.srq; |
1761 | init_attr->cap = attr->cap; |
1762 | if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) |
1763 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
1764 | else |
1765 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
1766 | init_attr->qp_type = qp->ibqp.qp_type; |
1767 | init_attr->port_num = qp->port_num; |
1768 | return 0; |
1769 | } |
1770 | |
1771 | /** |
1772 | * rvt_post_recv - post a receive on a QP |
1773 | * @ibqp: the QP to post the receive on |
1774 | * @wr: the WR to post |
1775 | * @bad_wr: the first bad WR is put here |
1776 | * |
1777 | * This may be called from interrupt context. |
1778 | * |
1779 | * Return: 0 on success otherwise errno |
1780 | */ |
1781 | int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
1782 | const struct ib_recv_wr **bad_wr) |
1783 | { |
1784 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1785 | struct rvt_krwq *wq = qp->r_rq.kwq; |
1786 | unsigned long flags; |
1787 | int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && |
1788 | !qp->ibqp.srq; |
1789 | |
1790 | /* Check that state is OK to post receive. */ |
1791 | if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { |
1792 | *bad_wr = wr; |
1793 | return -EINVAL; |
1794 | } |
1795 | |
1796 | for (; wr; wr = wr->next) { |
1797 | struct rvt_rwqe *wqe; |
1798 | u32 next; |
1799 | int i; |
1800 | |
1801 | if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { |
1802 | *bad_wr = wr; |
1803 | return -EINVAL; |
1804 | } |
1805 | |
1806 | spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags); |
1807 | next = wq->head + 1; |
1808 | if (next >= qp->r_rq.size) |
1809 | next = 0; |
1810 | if (next == READ_ONCE(wq->tail)) { |
1811 | spin_unlock_irqrestore(lock: &qp->r_rq.kwq->p_lock, flags); |
1812 | *bad_wr = wr; |
1813 | return -ENOMEM; |
1814 | } |
1815 | if (unlikely(qp_err_flush)) { |
1816 | struct ib_wc wc; |
1817 | |
1818 | memset(&wc, 0, sizeof(wc)); |
1819 | wc.qp = &qp->ibqp; |
1820 | wc.opcode = IB_WC_RECV; |
1821 | wc.wr_id = wr->wr_id; |
1822 | wc.status = IB_WC_WR_FLUSH_ERR; |
1823 | rvt_cq_enter(cq: ibcq_to_rvtcq(ibcq: qp->ibqp.recv_cq), entry: &wc, solicited: 1); |
1824 | } else { |
1825 | wqe = rvt_get_rwqe_ptr(rq: &qp->r_rq, n: wq->head); |
1826 | wqe->wr_id = wr->wr_id; |
1827 | wqe->num_sge = wr->num_sge; |
1828 | for (i = 0; i < wr->num_sge; i++) { |
1829 | wqe->sg_list[i].addr = wr->sg_list[i].addr; |
1830 | wqe->sg_list[i].length = wr->sg_list[i].length; |
1831 | wqe->sg_list[i].lkey = wr->sg_list[i].lkey; |
1832 | } |
1833 | /* |
1834 | * Make sure queue entry is written |
1835 | * before the head index. |
1836 | */ |
1837 | smp_store_release(&wq->head, next); |
1838 | } |
1839 | spin_unlock_irqrestore(lock: &qp->r_rq.kwq->p_lock, flags); |
1840 | } |
1841 | return 0; |
1842 | } |
1843 | |
1844 | /** |
1845 | * rvt_qp_valid_operation - validate post send wr request |
1846 | * @qp: the qp |
1847 | * @post_parms: the post send table for the driver |
1848 | * @wr: the work request |
1849 | * |
1850 | * The routine validates the operation based on the |
1851 | * validation table an returns the length of the operation |
1852 | * which can extend beyond the ib_send_bw. Operation |
1853 | * dependent flags key atomic operation validation. |
1854 | * |
1855 | * There is an exception for UD qps that validates the pd and |
1856 | * overrides the length to include the additional UD specific |
1857 | * length. |
1858 | * |
1859 | * Returns a negative error or the length of the work request |
1860 | * for building the swqe. |
1861 | */ |
1862 | static inline int rvt_qp_valid_operation( |
1863 | struct rvt_qp *qp, |
1864 | const struct rvt_operation_params *post_parms, |
1865 | const struct ib_send_wr *wr) |
1866 | { |
1867 | int len; |
1868 | |
1869 | if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length) |
1870 | return -EINVAL; |
1871 | if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type))) |
1872 | return -EINVAL; |
1873 | if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) && |
1874 | ibpd_to_rvtpd(ibpd: qp->ibqp.pd)->user) |
1875 | return -EINVAL; |
1876 | if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE && |
1877 | (wr->num_sge == 0 || |
1878 | wr->sg_list[0].length < sizeof(u64) || |
1879 | wr->sg_list[0].addr & (sizeof(u64) - 1))) |
1880 | return -EINVAL; |
1881 | if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC && |
1882 | !qp->s_max_rd_atomic) |
1883 | return -EINVAL; |
1884 | len = post_parms[wr->opcode].length; |
1885 | /* UD specific */ |
1886 | if (qp->ibqp.qp_type != IB_QPT_UC && |
1887 | qp->ibqp.qp_type != IB_QPT_RC) { |
1888 | if (qp->ibqp.pd != ud_wr(wr)->ah->pd) |
1889 | return -EINVAL; |
1890 | len = sizeof(struct ib_ud_wr); |
1891 | } |
1892 | return len; |
1893 | } |
1894 | |
1895 | /** |
1896 | * rvt_qp_is_avail - determine queue capacity |
1897 | * @qp: the qp |
1898 | * @rdi: the rdmavt device |
1899 | * @reserved_op: is reserved operation |
1900 | * |
1901 | * This assumes the s_hlock is held but the s_last |
1902 | * qp variable is uncontrolled. |
1903 | * |
1904 | * For non reserved operations, the qp->s_avail |
1905 | * may be changed. |
1906 | * |
1907 | * The return value is zero or a -ENOMEM. |
1908 | */ |
1909 | static inline int rvt_qp_is_avail( |
1910 | struct rvt_qp *qp, |
1911 | struct rvt_dev_info *rdi, |
1912 | bool reserved_op) |
1913 | { |
1914 | u32 slast; |
1915 | u32 avail; |
1916 | u32 reserved_used; |
1917 | |
1918 | /* see rvt_qp_wqe_unreserve() */ |
1919 | smp_mb__before_atomic(); |
1920 | if (unlikely(reserved_op)) { |
1921 | /* see rvt_qp_wqe_unreserve() */ |
1922 | reserved_used = atomic_read(v: &qp->s_reserved_used); |
1923 | if (reserved_used >= rdi->dparms.reserved_operations) |
1924 | return -ENOMEM; |
1925 | return 0; |
1926 | } |
1927 | /* non-reserved operations */ |
1928 | if (likely(qp->s_avail)) |
1929 | return 0; |
1930 | /* See rvt_qp_complete_swqe() */ |
1931 | slast = smp_load_acquire(&qp->s_last); |
1932 | if (qp->s_head >= slast) |
1933 | avail = qp->s_size - (qp->s_head - slast); |
1934 | else |
1935 | avail = slast - qp->s_head; |
1936 | |
1937 | reserved_used = atomic_read(v: &qp->s_reserved_used); |
1938 | avail = avail - 1 - |
1939 | (rdi->dparms.reserved_operations - reserved_used); |
1940 | /* insure we don't assign a negative s_avail */ |
1941 | if ((s32)avail <= 0) |
1942 | return -ENOMEM; |
1943 | qp->s_avail = avail; |
1944 | if (WARN_ON(qp->s_avail > |
1945 | (qp->s_size - 1 - rdi->dparms.reserved_operations))) |
1946 | rvt_pr_err(rdi, |
1947 | "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u" , |
1948 | qp->ibqp.qp_num, qp->s_size, qp->s_avail, |
1949 | qp->s_head, qp->s_tail, qp->s_cur, |
1950 | qp->s_acked, qp->s_last); |
1951 | return 0; |
1952 | } |
1953 | |
1954 | /** |
1955 | * rvt_post_one_wr - post one RC, UC, or UD send work request |
1956 | * @qp: the QP to post on |
1957 | * @wr: the work request to send |
1958 | * @call_send: kick the send engine into gear |
1959 | */ |
1960 | static int rvt_post_one_wr(struct rvt_qp *qp, |
1961 | const struct ib_send_wr *wr, |
1962 | bool *call_send) |
1963 | { |
1964 | struct rvt_swqe *wqe; |
1965 | u32 next; |
1966 | int i; |
1967 | int j; |
1968 | int acc; |
1969 | struct rvt_lkey_table *rkt; |
1970 | struct rvt_pd *pd; |
1971 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
1972 | u8 log_pmtu; |
1973 | int ret; |
1974 | size_t cplen; |
1975 | bool reserved_op; |
1976 | int local_ops_delayed = 0; |
1977 | |
1978 | BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE)); |
1979 | |
1980 | /* IB spec says that num_sge == 0 is OK. */ |
1981 | if (unlikely(wr->num_sge > qp->s_max_sge)) |
1982 | return -EINVAL; |
1983 | |
1984 | ret = rvt_qp_valid_operation(qp, post_parms: rdi->post_parms, wr); |
1985 | if (ret < 0) |
1986 | return ret; |
1987 | cplen = ret; |
1988 | |
1989 | /* |
1990 | * Local operations include fast register and local invalidate. |
1991 | * Fast register needs to be processed immediately because the |
1992 | * registered lkey may be used by following work requests and the |
1993 | * lkey needs to be valid at the time those requests are posted. |
1994 | * Local invalidate can be processed immediately if fencing is |
1995 | * not required and no previous local invalidate ops are pending. |
1996 | * Signaled local operations that have been processed immediately |
1997 | * need to have requests with "completion only" flags set posted |
1998 | * to the send queue in order to generate completions. |
1999 | */ |
2000 | if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) { |
2001 | switch (wr->opcode) { |
2002 | case IB_WR_REG_MR: |
2003 | ret = rvt_fast_reg_mr(qp, |
2004 | ibmr: reg_wr(wr)->mr, |
2005 | key: reg_wr(wr)->key, |
2006 | access: reg_wr(wr)->access); |
2007 | if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) |
2008 | return ret; |
2009 | break; |
2010 | case IB_WR_LOCAL_INV: |
2011 | if ((wr->send_flags & IB_SEND_FENCE) || |
2012 | atomic_read(v: &qp->local_ops_pending)) { |
2013 | local_ops_delayed = 1; |
2014 | } else { |
2015 | ret = rvt_invalidate_rkey( |
2016 | qp, rkey: wr->ex.invalidate_rkey); |
2017 | if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) |
2018 | return ret; |
2019 | } |
2020 | break; |
2021 | default: |
2022 | return -EINVAL; |
2023 | } |
2024 | } |
2025 | |
2026 | reserved_op = rdi->post_parms[wr->opcode].flags & |
2027 | RVT_OPERATION_USE_RESERVE; |
2028 | /* check for avail */ |
2029 | ret = rvt_qp_is_avail(qp, rdi, reserved_op); |
2030 | if (ret) |
2031 | return ret; |
2032 | next = qp->s_head + 1; |
2033 | if (next >= qp->s_size) |
2034 | next = 0; |
2035 | |
2036 | rkt = &rdi->lkey_table; |
2037 | pd = ibpd_to_rvtpd(ibpd: qp->ibqp.pd); |
2038 | wqe = rvt_get_swqe_ptr(qp, n: qp->s_head); |
2039 | |
2040 | /* cplen has length from above */ |
2041 | memcpy(&wqe->ud_wr, wr, cplen); |
2042 | |
2043 | wqe->length = 0; |
2044 | j = 0; |
2045 | if (wr->num_sge) { |
2046 | struct rvt_sge *last_sge = NULL; |
2047 | |
2048 | acc = wr->opcode >= IB_WR_RDMA_READ ? |
2049 | IB_ACCESS_LOCAL_WRITE : 0; |
2050 | for (i = 0; i < wr->num_sge; i++) { |
2051 | u32 length = wr->sg_list[i].length; |
2052 | |
2053 | if (length == 0) |
2054 | continue; |
2055 | ret = rvt_lkey_ok(rkt, pd, isge: &wqe->sg_list[j], last_sge, |
2056 | sge: &wr->sg_list[i], acc); |
2057 | if (unlikely(ret < 0)) |
2058 | goto bail_inval_free; |
2059 | wqe->length += length; |
2060 | if (ret) |
2061 | last_sge = &wqe->sg_list[j]; |
2062 | j += ret; |
2063 | } |
2064 | wqe->wr.num_sge = j; |
2065 | } |
2066 | |
2067 | /* |
2068 | * Calculate and set SWQE PSN values prior to handing it off |
2069 | * to the driver's check routine. This give the driver the |
2070 | * opportunity to adjust PSN values based on internal checks. |
2071 | */ |
2072 | log_pmtu = qp->log_pmtu; |
2073 | if (qp->allowed_ops == IB_OPCODE_UD) { |
2074 | struct rvt_ah *ah = rvt_get_swqe_ah(swqe: wqe); |
2075 | |
2076 | log_pmtu = ah->log_pmtu; |
2077 | rdma_copy_ah_attr(dest: wqe->ud_wr.attr, src: &ah->attr); |
2078 | } |
2079 | |
2080 | if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) { |
2081 | if (local_ops_delayed) |
2082 | atomic_inc(v: &qp->local_ops_pending); |
2083 | else |
2084 | wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY; |
2085 | wqe->ssn = 0; |
2086 | wqe->psn = 0; |
2087 | wqe->lpsn = 0; |
2088 | } else { |
2089 | wqe->ssn = qp->s_ssn++; |
2090 | wqe->psn = qp->s_next_psn; |
2091 | wqe->lpsn = wqe->psn + |
2092 | (wqe->length ? |
2093 | ((wqe->length - 1) >> log_pmtu) : |
2094 | 0); |
2095 | } |
2096 | |
2097 | /* general part of wqe valid - allow for driver checks */ |
2098 | if (rdi->driver_f.setup_wqe) { |
2099 | ret = rdi->driver_f.setup_wqe(qp, wqe, call_send); |
2100 | if (ret < 0) |
2101 | goto bail_inval_free_ref; |
2102 | } |
2103 | |
2104 | if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) |
2105 | qp->s_next_psn = wqe->lpsn + 1; |
2106 | |
2107 | if (unlikely(reserved_op)) { |
2108 | wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; |
2109 | rvt_qp_wqe_reserve(qp, wqe); |
2110 | } else { |
2111 | wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; |
2112 | qp->s_avail--; |
2113 | } |
2114 | trace_rvt_post_one_wr(qp, wqe, wr_num_sge: wr->num_sge); |
2115 | smp_wmb(); /* see request builders */ |
2116 | qp->s_head = next; |
2117 | |
2118 | return 0; |
2119 | |
2120 | bail_inval_free_ref: |
2121 | if (qp->allowed_ops == IB_OPCODE_UD) |
2122 | rdma_destroy_ah_attr(ah_attr: wqe->ud_wr.attr); |
2123 | bail_inval_free: |
2124 | /* release mr holds */ |
2125 | while (j) { |
2126 | struct rvt_sge *sge = &wqe->sg_list[--j]; |
2127 | |
2128 | rvt_put_mr(mr: sge->mr); |
2129 | } |
2130 | return ret; |
2131 | } |
2132 | |
2133 | /** |
2134 | * rvt_post_send - post a send on a QP |
2135 | * @ibqp: the QP to post the send on |
2136 | * @wr: the list of work requests to post |
2137 | * @bad_wr: the first bad WR is put here |
2138 | * |
2139 | * This may be called from interrupt context. |
2140 | * |
2141 | * Return: 0 on success else errno |
2142 | */ |
2143 | int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
2144 | const struct ib_send_wr **bad_wr) |
2145 | { |
2146 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
2147 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
2148 | unsigned long flags = 0; |
2149 | bool call_send; |
2150 | unsigned nreq = 0; |
2151 | int err = 0; |
2152 | |
2153 | spin_lock_irqsave(&qp->s_hlock, flags); |
2154 | |
2155 | /* |
2156 | * Ensure QP state is such that we can send. If not bail out early, |
2157 | * there is no need to do this every time we post a send. |
2158 | */ |
2159 | if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { |
2160 | spin_unlock_irqrestore(lock: &qp->s_hlock, flags); |
2161 | return -EINVAL; |
2162 | } |
2163 | |
2164 | /* |
2165 | * If the send queue is empty, and we only have a single WR then just go |
2166 | * ahead and kick the send engine into gear. Otherwise we will always |
2167 | * just schedule the send to happen later. |
2168 | */ |
2169 | call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; |
2170 | |
2171 | for (; wr; wr = wr->next) { |
2172 | err = rvt_post_one_wr(qp, wr, call_send: &call_send); |
2173 | if (unlikely(err)) { |
2174 | *bad_wr = wr; |
2175 | goto bail; |
2176 | } |
2177 | nreq++; |
2178 | } |
2179 | bail: |
2180 | spin_unlock_irqrestore(lock: &qp->s_hlock, flags); |
2181 | if (nreq) { |
2182 | /* |
2183 | * Only call do_send if there is exactly one packet, and the |
2184 | * driver said it was ok. |
2185 | */ |
2186 | if (nreq == 1 && call_send) |
2187 | rdi->driver_f.do_send(qp); |
2188 | else |
2189 | rdi->driver_f.schedule_send_no_lock(qp); |
2190 | } |
2191 | return err; |
2192 | } |
2193 | |
2194 | /** |
2195 | * rvt_post_srq_recv - post a receive on a shared receive queue |
2196 | * @ibsrq: the SRQ to post the receive on |
2197 | * @wr: the list of work requests to post |
2198 | * @bad_wr: A pointer to the first WR to cause a problem is put here |
2199 | * |
2200 | * This may be called from interrupt context. |
2201 | * |
2202 | * Return: 0 on success else errno |
2203 | */ |
2204 | int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
2205 | const struct ib_recv_wr **bad_wr) |
2206 | { |
2207 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
2208 | struct rvt_krwq *wq; |
2209 | unsigned long flags; |
2210 | |
2211 | for (; wr; wr = wr->next) { |
2212 | struct rvt_rwqe *wqe; |
2213 | u32 next; |
2214 | int i; |
2215 | |
2216 | if ((unsigned)wr->num_sge > srq->rq.max_sge) { |
2217 | *bad_wr = wr; |
2218 | return -EINVAL; |
2219 | } |
2220 | |
2221 | spin_lock_irqsave(&srq->rq.kwq->p_lock, flags); |
2222 | wq = srq->rq.kwq; |
2223 | next = wq->head + 1; |
2224 | if (next >= srq->rq.size) |
2225 | next = 0; |
2226 | if (next == READ_ONCE(wq->tail)) { |
2227 | spin_unlock_irqrestore(lock: &srq->rq.kwq->p_lock, flags); |
2228 | *bad_wr = wr; |
2229 | return -ENOMEM; |
2230 | } |
2231 | |
2232 | wqe = rvt_get_rwqe_ptr(rq: &srq->rq, n: wq->head); |
2233 | wqe->wr_id = wr->wr_id; |
2234 | wqe->num_sge = wr->num_sge; |
2235 | for (i = 0; i < wr->num_sge; i++) { |
2236 | wqe->sg_list[i].addr = wr->sg_list[i].addr; |
2237 | wqe->sg_list[i].length = wr->sg_list[i].length; |
2238 | wqe->sg_list[i].lkey = wr->sg_list[i].lkey; |
2239 | } |
2240 | /* Make sure queue entry is written before the head index. */ |
2241 | smp_store_release(&wq->head, next); |
2242 | spin_unlock_irqrestore(lock: &srq->rq.kwq->p_lock, flags); |
2243 | } |
2244 | return 0; |
2245 | } |
2246 | |
2247 | /* |
2248 | * rvt used the internal kernel struct as part of its ABI, for now make sure |
2249 | * the kernel struct does not change layout. FIXME: rvt should never cast the |
2250 | * user struct to a kernel struct. |
2251 | */ |
2252 | static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge) |
2253 | { |
2254 | BUILD_BUG_ON(offsetof(struct ib_sge, addr) != |
2255 | offsetof(struct rvt_wqe_sge, addr)); |
2256 | BUILD_BUG_ON(offsetof(struct ib_sge, length) != |
2257 | offsetof(struct rvt_wqe_sge, length)); |
2258 | BUILD_BUG_ON(offsetof(struct ib_sge, lkey) != |
2259 | offsetof(struct rvt_wqe_sge, lkey)); |
2260 | return (struct ib_sge *)sge; |
2261 | } |
2262 | |
2263 | /* |
2264 | * Validate a RWQE and fill in the SGE state. |
2265 | * Return 1 if OK. |
2266 | */ |
2267 | static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) |
2268 | { |
2269 | int i, j, ret; |
2270 | struct ib_wc wc; |
2271 | struct rvt_lkey_table *rkt; |
2272 | struct rvt_pd *pd; |
2273 | struct rvt_sge_state *ss; |
2274 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
2275 | |
2276 | rkt = &rdi->lkey_table; |
2277 | pd = ibpd_to_rvtpd(ibpd: qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); |
2278 | ss = &qp->r_sge; |
2279 | ss->sg_list = qp->r_sg_list; |
2280 | qp->r_len = 0; |
2281 | for (i = j = 0; i < wqe->num_sge; i++) { |
2282 | if (wqe->sg_list[i].length == 0) |
2283 | continue; |
2284 | /* Check LKEY */ |
2285 | ret = rvt_lkey_ok(rkt, pd, isge: j ? &ss->sg_list[j - 1] : &ss->sge, |
2286 | NULL, sge: rvt_cast_sge(sge: &wqe->sg_list[i]), |
2287 | acc: IB_ACCESS_LOCAL_WRITE); |
2288 | if (unlikely(ret <= 0)) |
2289 | goto bad_lkey; |
2290 | qp->r_len += wqe->sg_list[i].length; |
2291 | j++; |
2292 | } |
2293 | ss->num_sge = j; |
2294 | ss->total_len = qp->r_len; |
2295 | return 1; |
2296 | |
2297 | bad_lkey: |
2298 | while (j) { |
2299 | struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
2300 | |
2301 | rvt_put_mr(mr: sge->mr); |
2302 | } |
2303 | ss->num_sge = 0; |
2304 | memset(&wc, 0, sizeof(wc)); |
2305 | wc.wr_id = wqe->wr_id; |
2306 | wc.status = IB_WC_LOC_PROT_ERR; |
2307 | wc.opcode = IB_WC_RECV; |
2308 | wc.qp = &qp->ibqp; |
2309 | /* Signal solicited completion event. */ |
2310 | rvt_cq_enter(cq: ibcq_to_rvtcq(ibcq: qp->ibqp.recv_cq), entry: &wc, solicited: 1); |
2311 | return 0; |
2312 | } |
2313 | |
2314 | /** |
2315 | * get_rvt_head - get head indices of the circular buffer |
2316 | * @rq: data structure for request queue entry |
2317 | * @ip: the QP |
2318 | * |
2319 | * Return - head index value |
2320 | */ |
2321 | static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip) |
2322 | { |
2323 | u32 head; |
2324 | |
2325 | if (ip) |
2326 | head = RDMA_READ_UAPI_ATOMIC(rq->wq->head); |
2327 | else |
2328 | head = rq->kwq->head; |
2329 | |
2330 | return head; |
2331 | } |
2332 | |
2333 | /** |
2334 | * rvt_get_rwqe - copy the next RWQE into the QP's RWQE |
2335 | * @qp: the QP |
2336 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge |
2337 | * |
2338 | * Return -1 if there is a local error, 0 if no RWQE is available, |
2339 | * otherwise return 1. |
2340 | * |
2341 | * Can be called from interrupt level. |
2342 | */ |
2343 | int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) |
2344 | { |
2345 | unsigned long flags; |
2346 | struct rvt_rq *rq; |
2347 | struct rvt_krwq *kwq = NULL; |
2348 | struct rvt_rwq *wq; |
2349 | struct rvt_srq *srq; |
2350 | struct rvt_rwqe *wqe; |
2351 | void (*handler)(struct ib_event *, void *); |
2352 | u32 tail; |
2353 | u32 head; |
2354 | int ret; |
2355 | void *ip = NULL; |
2356 | |
2357 | if (qp->ibqp.srq) { |
2358 | srq = ibsrq_to_rvtsrq(ibsrq: qp->ibqp.srq); |
2359 | handler = srq->ibsrq.event_handler; |
2360 | rq = &srq->rq; |
2361 | ip = srq->ip; |
2362 | } else { |
2363 | srq = NULL; |
2364 | handler = NULL; |
2365 | rq = &qp->r_rq; |
2366 | ip = qp->ip; |
2367 | } |
2368 | |
2369 | spin_lock_irqsave(&rq->kwq->c_lock, flags); |
2370 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { |
2371 | ret = 0; |
2372 | goto unlock; |
2373 | } |
2374 | kwq = rq->kwq; |
2375 | if (ip) { |
2376 | wq = rq->wq; |
2377 | tail = RDMA_READ_UAPI_ATOMIC(wq->tail); |
2378 | } else { |
2379 | tail = kwq->tail; |
2380 | } |
2381 | |
2382 | /* Validate tail before using it since it is user writable. */ |
2383 | if (tail >= rq->size) |
2384 | tail = 0; |
2385 | |
2386 | if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { |
2387 | head = get_rvt_head(rq, ip); |
2388 | kwq->count = rvt_get_rq_count(rq, head, tail); |
2389 | } |
2390 | if (unlikely(kwq->count == 0)) { |
2391 | ret = 0; |
2392 | goto unlock; |
2393 | } |
2394 | /* Make sure entry is read after the count is read. */ |
2395 | smp_rmb(); |
2396 | wqe = rvt_get_rwqe_ptr(rq, n: tail); |
2397 | /* |
2398 | * Even though we update the tail index in memory, the verbs |
2399 | * consumer is not supposed to post more entries until a |
2400 | * completion is generated. |
2401 | */ |
2402 | if (++tail >= rq->size) |
2403 | tail = 0; |
2404 | if (ip) |
2405 | RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); |
2406 | else |
2407 | kwq->tail = tail; |
2408 | if (!wr_id_only && !init_sge(qp, wqe)) { |
2409 | ret = -1; |
2410 | goto unlock; |
2411 | } |
2412 | qp->r_wr_id = wqe->wr_id; |
2413 | |
2414 | kwq->count--; |
2415 | ret = 1; |
2416 | set_bit(RVT_R_WRID_VALID, addr: &qp->r_aflags); |
2417 | if (handler) { |
2418 | /* |
2419 | * Validate head pointer value and compute |
2420 | * the number of remaining WQEs. |
2421 | */ |
2422 | if (kwq->count < srq->limit) { |
2423 | kwq->count = |
2424 | rvt_get_rq_count(rq, |
2425 | head: get_rvt_head(rq, ip), tail); |
2426 | if (kwq->count < srq->limit) { |
2427 | struct ib_event ev; |
2428 | |
2429 | srq->limit = 0; |
2430 | spin_unlock_irqrestore(lock: &rq->kwq->c_lock, flags); |
2431 | ev.device = qp->ibqp.device; |
2432 | ev.element.srq = qp->ibqp.srq; |
2433 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
2434 | handler(&ev, srq->ibsrq.srq_context); |
2435 | goto bail; |
2436 | } |
2437 | } |
2438 | } |
2439 | unlock: |
2440 | spin_unlock_irqrestore(lock: &rq->kwq->c_lock, flags); |
2441 | bail: |
2442 | return ret; |
2443 | } |
2444 | EXPORT_SYMBOL(rvt_get_rwqe); |
2445 | |
2446 | /** |
2447 | * rvt_comm_est - handle trap with QP established |
2448 | * @qp: the QP |
2449 | */ |
2450 | void rvt_comm_est(struct rvt_qp *qp) |
2451 | { |
2452 | qp->r_flags |= RVT_R_COMM_EST; |
2453 | if (qp->ibqp.event_handler) { |
2454 | struct ib_event ev; |
2455 | |
2456 | ev.device = qp->ibqp.device; |
2457 | ev.element.qp = &qp->ibqp; |
2458 | ev.event = IB_EVENT_COMM_EST; |
2459 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
2460 | } |
2461 | } |
2462 | EXPORT_SYMBOL(rvt_comm_est); |
2463 | |
2464 | void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) |
2465 | { |
2466 | unsigned long flags; |
2467 | int lastwqe; |
2468 | |
2469 | spin_lock_irqsave(&qp->s_lock, flags); |
2470 | lastwqe = rvt_error_qp(qp, err); |
2471 | spin_unlock_irqrestore(lock: &qp->s_lock, flags); |
2472 | |
2473 | if (lastwqe) { |
2474 | struct ib_event ev; |
2475 | |
2476 | ev.device = qp->ibqp.device; |
2477 | ev.element.qp = &qp->ibqp; |
2478 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
2479 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
2480 | } |
2481 | } |
2482 | EXPORT_SYMBOL(rvt_rc_error); |
2483 | |
2484 | /* |
2485 | * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table |
2486 | * @index - the index |
2487 | * return usec from an index into ib_rvt_rnr_table |
2488 | */ |
2489 | unsigned long rvt_rnr_tbl_to_usec(u32 index) |
2490 | { |
2491 | return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)]; |
2492 | } |
2493 | EXPORT_SYMBOL(rvt_rnr_tbl_to_usec); |
2494 | |
2495 | static inline unsigned long rvt_aeth_to_usec(u32 aeth) |
2496 | { |
2497 | return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) & |
2498 | IB_AETH_CREDIT_MASK]; |
2499 | } |
2500 | |
2501 | /* |
2502 | * rvt_add_retry_timer_ext - add/start a retry timer |
2503 | * @qp - the QP |
2504 | * @shift - timeout shift to wait for multiple packets |
2505 | * add a retry timer on the QP |
2506 | */ |
2507 | void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) |
2508 | { |
2509 | struct ib_qp *ibqp = &qp->ibqp; |
2510 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: ibqp->device); |
2511 | |
2512 | lockdep_assert_held(&qp->s_lock); |
2513 | qp->s_flags |= RVT_S_TIMER; |
2514 | /* 4.096 usec. * (1 << qp->timeout) */ |
2515 | qp->s_timer.expires = jiffies + rdi->busy_jiffies + |
2516 | (qp->timeout_jiffies << shift); |
2517 | add_timer(timer: &qp->s_timer); |
2518 | } |
2519 | EXPORT_SYMBOL(rvt_add_retry_timer_ext); |
2520 | |
2521 | /** |
2522 | * rvt_add_rnr_timer - add/start an rnr timer on the QP |
2523 | * @qp: the QP |
2524 | * @aeth: aeth of RNR timeout, simulated aeth for loopback |
2525 | */ |
2526 | void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) |
2527 | { |
2528 | u32 to; |
2529 | |
2530 | lockdep_assert_held(&qp->s_lock); |
2531 | qp->s_flags |= RVT_S_WAIT_RNR; |
2532 | to = rvt_aeth_to_usec(aeth); |
2533 | trace_rvt_rnrnak_add(qp, to); |
2534 | hrtimer_start(timer: &qp->s_rnr_timer, |
2535 | tim: ns_to_ktime(ns: 1000 * to), mode: HRTIMER_MODE_REL_PINNED); |
2536 | } |
2537 | EXPORT_SYMBOL(rvt_add_rnr_timer); |
2538 | |
2539 | /** |
2540 | * rvt_stop_rc_timers - stop all timers |
2541 | * @qp: the QP |
2542 | * stop any pending timers |
2543 | */ |
2544 | void rvt_stop_rc_timers(struct rvt_qp *qp) |
2545 | { |
2546 | lockdep_assert_held(&qp->s_lock); |
2547 | /* Remove QP from all timers */ |
2548 | if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { |
2549 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); |
2550 | del_timer(timer: &qp->s_timer); |
2551 | hrtimer_try_to_cancel(timer: &qp->s_rnr_timer); |
2552 | } |
2553 | } |
2554 | EXPORT_SYMBOL(rvt_stop_rc_timers); |
2555 | |
2556 | /** |
2557 | * rvt_stop_rnr_timer - stop an rnr timer |
2558 | * @qp: the QP |
2559 | * |
2560 | * stop an rnr timer and return if the timer |
2561 | * had been pending. |
2562 | */ |
2563 | static void rvt_stop_rnr_timer(struct rvt_qp *qp) |
2564 | { |
2565 | lockdep_assert_held(&qp->s_lock); |
2566 | /* Remove QP from rnr timer */ |
2567 | if (qp->s_flags & RVT_S_WAIT_RNR) { |
2568 | qp->s_flags &= ~RVT_S_WAIT_RNR; |
2569 | trace_rvt_rnrnak_stop(qp, to: 0); |
2570 | } |
2571 | } |
2572 | |
2573 | /** |
2574 | * rvt_del_timers_sync - wait for any timeout routines to exit |
2575 | * @qp: the QP |
2576 | */ |
2577 | void rvt_del_timers_sync(struct rvt_qp *qp) |
2578 | { |
2579 | del_timer_sync(timer: &qp->s_timer); |
2580 | hrtimer_cancel(timer: &qp->s_rnr_timer); |
2581 | } |
2582 | EXPORT_SYMBOL(rvt_del_timers_sync); |
2583 | |
2584 | /* |
2585 | * This is called from s_timer for missing responses. |
2586 | */ |
2587 | static void rvt_rc_timeout(struct timer_list *t) |
2588 | { |
2589 | struct rvt_qp *qp = from_timer(qp, t, s_timer); |
2590 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
2591 | unsigned long flags; |
2592 | |
2593 | spin_lock_irqsave(&qp->r_lock, flags); |
2594 | spin_lock(lock: &qp->s_lock); |
2595 | if (qp->s_flags & RVT_S_TIMER) { |
2596 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
2597 | |
2598 | qp->s_flags &= ~RVT_S_TIMER; |
2599 | rvp->n_rc_timeouts++; |
2600 | del_timer(timer: &qp->s_timer); |
2601 | trace_rvt_rc_timeout(qp, psn: qp->s_last_psn + 1); |
2602 | if (rdi->driver_f.notify_restart_rc) |
2603 | rdi->driver_f.notify_restart_rc(qp, |
2604 | qp->s_last_psn + 1, |
2605 | 1); |
2606 | rdi->driver_f.schedule_send(qp); |
2607 | } |
2608 | spin_unlock(lock: &qp->s_lock); |
2609 | spin_unlock_irqrestore(lock: &qp->r_lock, flags); |
2610 | } |
2611 | |
2612 | /* |
2613 | * This is called from s_timer for RNR timeouts. |
2614 | */ |
2615 | enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t) |
2616 | { |
2617 | struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); |
2618 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
2619 | unsigned long flags; |
2620 | |
2621 | spin_lock_irqsave(&qp->s_lock, flags); |
2622 | rvt_stop_rnr_timer(qp); |
2623 | trace_rvt_rnrnak_timeout(qp, to: 0); |
2624 | rdi->driver_f.schedule_send(qp); |
2625 | spin_unlock_irqrestore(lock: &qp->s_lock, flags); |
2626 | return HRTIMER_NORESTART; |
2627 | } |
2628 | EXPORT_SYMBOL(rvt_rc_rnr_retry); |
2629 | |
2630 | /** |
2631 | * rvt_qp_iter_init - initial for QP iteration |
2632 | * @rdi: rvt devinfo |
2633 | * @v: u64 value |
2634 | * @cb: user-defined callback |
2635 | * |
2636 | * This returns an iterator suitable for iterating QPs |
2637 | * in the system. |
2638 | * |
2639 | * The @cb is a user-defined callback and @v is a 64-bit |
2640 | * value passed to and relevant for processing in the |
2641 | * @cb. An example use case would be to alter QP processing |
2642 | * based on criteria not part of the rvt_qp. |
2643 | * |
2644 | * Use cases that require memory allocation to succeed |
2645 | * must preallocate appropriately. |
2646 | * |
2647 | * Return: a pointer to an rvt_qp_iter or NULL |
2648 | */ |
2649 | struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, |
2650 | u64 v, |
2651 | void (*cb)(struct rvt_qp *qp, u64 v)) |
2652 | { |
2653 | struct rvt_qp_iter *i; |
2654 | |
2655 | i = kzalloc(size: sizeof(*i), GFP_KERNEL); |
2656 | if (!i) |
2657 | return NULL; |
2658 | |
2659 | i->rdi = rdi; |
2660 | /* number of special QPs (SMI/GSI) for device */ |
2661 | i->specials = rdi->ibdev.phys_port_cnt * 2; |
2662 | i->v = v; |
2663 | i->cb = cb; |
2664 | |
2665 | return i; |
2666 | } |
2667 | EXPORT_SYMBOL(rvt_qp_iter_init); |
2668 | |
2669 | /** |
2670 | * rvt_qp_iter_next - return the next QP in iter |
2671 | * @iter: the iterator |
2672 | * |
2673 | * Fine grained QP iterator suitable for use |
2674 | * with debugfs seq_file mechanisms. |
2675 | * |
2676 | * Updates iter->qp with the current QP when the return |
2677 | * value is 0. |
2678 | * |
2679 | * Return: 0 - iter->qp is valid 1 - no more QPs |
2680 | */ |
2681 | int rvt_qp_iter_next(struct rvt_qp_iter *iter) |
2682 | __must_hold(RCU) |
2683 | { |
2684 | int n = iter->n; |
2685 | int ret = 1; |
2686 | struct rvt_qp *pqp = iter->qp; |
2687 | struct rvt_qp *qp; |
2688 | struct rvt_dev_info *rdi = iter->rdi; |
2689 | |
2690 | /* |
2691 | * The approach is to consider the special qps |
2692 | * as additional table entries before the |
2693 | * real hash table. Since the qp code sets |
2694 | * the qp->next hash link to NULL, this works just fine. |
2695 | * |
2696 | * iter->specials is 2 * # ports |
2697 | * |
2698 | * n = 0..iter->specials is the special qp indices |
2699 | * |
2700 | * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are |
2701 | * the potential hash bucket entries |
2702 | * |
2703 | */ |
2704 | for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) { |
2705 | if (pqp) { |
2706 | qp = rcu_dereference(pqp->next); |
2707 | } else { |
2708 | if (n < iter->specials) { |
2709 | struct rvt_ibport *rvp; |
2710 | int pidx; |
2711 | |
2712 | pidx = n % rdi->ibdev.phys_port_cnt; |
2713 | rvp = rdi->ports[pidx]; |
2714 | qp = rcu_dereference(rvp->qp[n & 1]); |
2715 | } else { |
2716 | qp = rcu_dereference( |
2717 | rdi->qp_dev->qp_table[ |
2718 | (n - iter->specials)]); |
2719 | } |
2720 | } |
2721 | pqp = qp; |
2722 | if (qp) { |
2723 | iter->qp = qp; |
2724 | iter->n = n; |
2725 | return 0; |
2726 | } |
2727 | } |
2728 | return ret; |
2729 | } |
2730 | EXPORT_SYMBOL(rvt_qp_iter_next); |
2731 | |
2732 | /** |
2733 | * rvt_qp_iter - iterate all QPs |
2734 | * @rdi: rvt devinfo |
2735 | * @v: a 64-bit value |
2736 | * @cb: a callback |
2737 | * |
2738 | * This provides a way for iterating all QPs. |
2739 | * |
2740 | * The @cb is a user-defined callback and @v is a 64-bit |
2741 | * value passed to and relevant for processing in the |
2742 | * cb. An example use case would be to alter QP processing |
2743 | * based on criteria not part of the rvt_qp. |
2744 | * |
2745 | * The code has an internal iterator to simplify |
2746 | * non seq_file use cases. |
2747 | */ |
2748 | void rvt_qp_iter(struct rvt_dev_info *rdi, |
2749 | u64 v, |
2750 | void (*cb)(struct rvt_qp *qp, u64 v)) |
2751 | { |
2752 | int ret; |
2753 | struct rvt_qp_iter i = { |
2754 | .rdi = rdi, |
2755 | .specials = rdi->ibdev.phys_port_cnt * 2, |
2756 | .v = v, |
2757 | .cb = cb |
2758 | }; |
2759 | |
2760 | rcu_read_lock(); |
2761 | do { |
2762 | ret = rvt_qp_iter_next(&i); |
2763 | if (!ret) { |
2764 | rvt_get_qp(qp: i.qp); |
2765 | rcu_read_unlock(); |
2766 | i.cb(i.qp, i.v); |
2767 | rcu_read_lock(); |
2768 | rvt_put_qp(qp: i.qp); |
2769 | } |
2770 | } while (!ret); |
2771 | rcu_read_unlock(); |
2772 | } |
2773 | EXPORT_SYMBOL(rvt_qp_iter); |
2774 | |
2775 | /* |
2776 | * This should be called with s_lock and r_lock held. |
2777 | */ |
2778 | void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, |
2779 | enum ib_wc_status status) |
2780 | { |
2781 | u32 old_last, last; |
2782 | struct rvt_dev_info *rdi; |
2783 | |
2784 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
2785 | return; |
2786 | rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
2787 | |
2788 | old_last = qp->s_last; |
2789 | trace_rvt_qp_send_completion(qp, wqe, idx: old_last); |
2790 | last = rvt_qp_complete_swqe(qp, wqe, opcode: rdi->wc_opcode[wqe->wr.opcode], |
2791 | status); |
2792 | if (qp->s_acked == old_last) |
2793 | qp->s_acked = last; |
2794 | if (qp->s_cur == old_last) |
2795 | qp->s_cur = last; |
2796 | if (qp->s_tail == old_last) |
2797 | qp->s_tail = last; |
2798 | if (qp->state == IB_QPS_SQD && last == qp->s_cur) |
2799 | qp->s_draining = 0; |
2800 | } |
2801 | EXPORT_SYMBOL(rvt_send_complete); |
2802 | |
2803 | /** |
2804 | * rvt_copy_sge - copy data to SGE memory |
2805 | * @qp: associated QP |
2806 | * @ss: the SGE state |
2807 | * @data: the data to copy |
2808 | * @length: the length of the data |
2809 | * @release: boolean to release MR |
2810 | * @copy_last: do a separate copy of the last 8 bytes |
2811 | */ |
2812 | void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, |
2813 | void *data, u32 length, |
2814 | bool release, bool copy_last) |
2815 | { |
2816 | struct rvt_sge *sge = &ss->sge; |
2817 | int i; |
2818 | bool in_last = false; |
2819 | bool cacheless_copy = false; |
2820 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: qp->ibqp.device); |
2821 | struct rvt_wss *wss = rdi->wss; |
2822 | unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; |
2823 | |
2824 | if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) { |
2825 | cacheless_copy = length >= PAGE_SIZE; |
2826 | } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) { |
2827 | if (length >= PAGE_SIZE) { |
2828 | /* |
2829 | * NOTE: this *assumes*: |
2830 | * o The first vaddr is the dest. |
2831 | * o If multiple pages, then vaddr is sequential. |
2832 | */ |
2833 | wss_insert(wss, address: sge->vaddr); |
2834 | if (length >= (2 * PAGE_SIZE)) |
2835 | wss_insert(wss, address: (sge->vaddr + PAGE_SIZE)); |
2836 | |
2837 | cacheless_copy = wss_exceeds_threshold(wss); |
2838 | } else { |
2839 | wss_advance_clean_counter(wss); |
2840 | } |
2841 | } |
2842 | |
2843 | if (copy_last) { |
2844 | if (length > 8) { |
2845 | length -= 8; |
2846 | } else { |
2847 | copy_last = false; |
2848 | in_last = true; |
2849 | } |
2850 | } |
2851 | |
2852 | again: |
2853 | while (length) { |
2854 | u32 len = rvt_get_sge_length(sge, length); |
2855 | |
2856 | WARN_ON_ONCE(len == 0); |
2857 | if (unlikely(in_last)) { |
2858 | /* enforce byte transfer ordering */ |
2859 | for (i = 0; i < len; i++) |
2860 | ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; |
2861 | } else if (cacheless_copy) { |
2862 | cacheless_memcpy(dst: sge->vaddr, src: data, n: len); |
2863 | } else { |
2864 | memcpy(sge->vaddr, data, len); |
2865 | } |
2866 | rvt_update_sge(ss, length: len, release); |
2867 | data += len; |
2868 | length -= len; |
2869 | } |
2870 | |
2871 | if (copy_last) { |
2872 | copy_last = false; |
2873 | in_last = true; |
2874 | length = 8; |
2875 | goto again; |
2876 | } |
2877 | } |
2878 | EXPORT_SYMBOL(rvt_copy_sge); |
2879 | |
2880 | static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp, |
2881 | struct rvt_qp *sqp) |
2882 | { |
2883 | rvp->n_pkt_drops++; |
2884 | /* |
2885 | * For RC, the requester would timeout and retry so |
2886 | * shortcut the timeouts and just signal too many retries. |
2887 | */ |
2888 | return sqp->ibqp.qp_type == IB_QPT_RC ? |
2889 | IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS; |
2890 | } |
2891 | |
2892 | /** |
2893 | * rvt_ruc_loopback - handle UC and RC loopback requests |
2894 | * @sqp: the sending QP |
2895 | * |
2896 | * This is called from rvt_do_send() to forward a WQE addressed to the same HFI |
2897 | * Note that although we are single threaded due to the send engine, we still |
2898 | * have to protect against post_send(). We don't have to worry about |
2899 | * receive interrupts since this is a connected protocol and all packets |
2900 | * will pass through here. |
2901 | */ |
2902 | void rvt_ruc_loopback(struct rvt_qp *sqp) |
2903 | { |
2904 | struct rvt_ibport *rvp = NULL; |
2905 | struct rvt_dev_info *rdi = ib_to_rvt(ibdev: sqp->ibqp.device); |
2906 | struct rvt_qp *qp; |
2907 | struct rvt_swqe *wqe; |
2908 | struct rvt_sge *sge; |
2909 | unsigned long flags; |
2910 | struct ib_wc wc; |
2911 | u64 sdata; |
2912 | atomic64_t *maddr; |
2913 | enum ib_wc_status send_status; |
2914 | bool release; |
2915 | int ret; |
2916 | bool copy_last = false; |
2917 | int local_ops = 0; |
2918 | |
2919 | rcu_read_lock(); |
2920 | rvp = rdi->ports[sqp->port_num - 1]; |
2921 | |
2922 | /* |
2923 | * Note that we check the responder QP state after |
2924 | * checking the requester's state. |
2925 | */ |
2926 | |
2927 | qp = rvt_lookup_qpn(rdi: ib_to_rvt(ibdev: sqp->ibqp.device), rvp, |
2928 | qpn: sqp->remote_qpn); |
2929 | |
2930 | spin_lock_irqsave(&sqp->s_lock, flags); |
2931 | |
2932 | /* Return if we are already busy processing a work request. */ |
2933 | if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || |
2934 | !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
2935 | goto unlock; |
2936 | |
2937 | sqp->s_flags |= RVT_S_BUSY; |
2938 | |
2939 | again: |
2940 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
2941 | goto clr_busy; |
2942 | wqe = rvt_get_swqe_ptr(qp: sqp, n: sqp->s_last); |
2943 | |
2944 | /* Return if it is not OK to start a new work request. */ |
2945 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { |
2946 | if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) |
2947 | goto clr_busy; |
2948 | /* We are in the error state, flush the work request. */ |
2949 | send_status = IB_WC_WR_FLUSH_ERR; |
2950 | goto flush_send; |
2951 | } |
2952 | |
2953 | /* |
2954 | * We can rely on the entry not changing without the s_lock |
2955 | * being held until we update s_last. |
2956 | * We increment s_cur to indicate s_last is in progress. |
2957 | */ |
2958 | if (sqp->s_last == sqp->s_cur) { |
2959 | if (++sqp->s_cur >= sqp->s_size) |
2960 | sqp->s_cur = 0; |
2961 | } |
2962 | spin_unlock_irqrestore(lock: &sqp->s_lock, flags); |
2963 | |
2964 | if (!qp) { |
2965 | send_status = loopback_qp_drop(rvp, sqp); |
2966 | goto serr_no_r_lock; |
2967 | } |
2968 | spin_lock_irqsave(&qp->r_lock, flags); |
2969 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || |
2970 | qp->ibqp.qp_type != sqp->ibqp.qp_type) { |
2971 | send_status = loopback_qp_drop(rvp, sqp); |
2972 | goto serr; |
2973 | } |
2974 | |
2975 | memset(&wc, 0, sizeof(wc)); |
2976 | send_status = IB_WC_SUCCESS; |
2977 | |
2978 | release = true; |
2979 | sqp->s_sge.sge = wqe->sg_list[0]; |
2980 | sqp->s_sge.sg_list = wqe->sg_list + 1; |
2981 | sqp->s_sge.num_sge = wqe->wr.num_sge; |
2982 | sqp->s_len = wqe->length; |
2983 | switch (wqe->wr.opcode) { |
2984 | case IB_WR_REG_MR: |
2985 | goto send_comp; |
2986 | |
2987 | case IB_WR_LOCAL_INV: |
2988 | if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { |
2989 | if (rvt_invalidate_rkey(qp: sqp, |
2990 | rkey: wqe->wr.ex.invalidate_rkey)) |
2991 | send_status = IB_WC_LOC_PROT_ERR; |
2992 | local_ops = 1; |
2993 | } |
2994 | goto send_comp; |
2995 | |
2996 | case IB_WR_SEND_WITH_INV: |
2997 | case IB_WR_SEND_WITH_IMM: |
2998 | case IB_WR_SEND: |
2999 | ret = rvt_get_rwqe(qp, false); |
3000 | if (ret < 0) |
3001 | goto op_err; |
3002 | if (!ret) |
3003 | goto rnr_nak; |
3004 | if (wqe->length > qp->r_len) |
3005 | goto inv_err; |
3006 | switch (wqe->wr.opcode) { |
3007 | case IB_WR_SEND_WITH_INV: |
3008 | if (!rvt_invalidate_rkey(qp, |
3009 | rkey: wqe->wr.ex.invalidate_rkey)) { |
3010 | wc.wc_flags = IB_WC_WITH_INVALIDATE; |
3011 | wc.ex.invalidate_rkey = |
3012 | wqe->wr.ex.invalidate_rkey; |
3013 | } |
3014 | break; |
3015 | case IB_WR_SEND_WITH_IMM: |
3016 | wc.wc_flags = IB_WC_WITH_IMM; |
3017 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
3018 | break; |
3019 | default: |
3020 | break; |
3021 | } |
3022 | break; |
3023 | |
3024 | case IB_WR_RDMA_WRITE_WITH_IMM: |
3025 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
3026 | goto inv_err; |
3027 | wc.wc_flags = IB_WC_WITH_IMM; |
3028 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
3029 | ret = rvt_get_rwqe(qp, true); |
3030 | if (ret < 0) |
3031 | goto op_err; |
3032 | if (!ret) |
3033 | goto rnr_nak; |
3034 | /* skip copy_last set and qp_access_flags recheck */ |
3035 | goto do_write; |
3036 | case IB_WR_RDMA_WRITE: |
3037 | copy_last = rvt_is_user_qp(qp); |
3038 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
3039 | goto inv_err; |
3040 | do_write: |
3041 | if (wqe->length == 0) |
3042 | break; |
3043 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
3044 | wqe->rdma_wr.remote_addr, |
3045 | wqe->rdma_wr.rkey, |
3046 | IB_ACCESS_REMOTE_WRITE))) |
3047 | goto acc_err; |
3048 | qp->r_sge.sg_list = NULL; |
3049 | qp->r_sge.num_sge = 1; |
3050 | qp->r_sge.total_len = wqe->length; |
3051 | break; |
3052 | |
3053 | case IB_WR_RDMA_READ: |
3054 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
3055 | goto inv_err; |
3056 | if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
3057 | wqe->rdma_wr.remote_addr, |
3058 | wqe->rdma_wr.rkey, |
3059 | IB_ACCESS_REMOTE_READ))) |
3060 | goto acc_err; |
3061 | release = false; |
3062 | sqp->s_sge.sg_list = NULL; |
3063 | sqp->s_sge.num_sge = 1; |
3064 | qp->r_sge.sge = wqe->sg_list[0]; |
3065 | qp->r_sge.sg_list = wqe->sg_list + 1; |
3066 | qp->r_sge.num_sge = wqe->wr.num_sge; |
3067 | qp->r_sge.total_len = wqe->length; |
3068 | break; |
3069 | |
3070 | case IB_WR_ATOMIC_CMP_AND_SWP: |
3071 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
3072 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
3073 | goto inv_err; |
3074 | if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) |
3075 | goto inv_err; |
3076 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
3077 | wqe->atomic_wr.remote_addr, |
3078 | wqe->atomic_wr.rkey, |
3079 | IB_ACCESS_REMOTE_ATOMIC))) |
3080 | goto acc_err; |
3081 | /* Perform atomic OP and save result. */ |
3082 | maddr = (atomic64_t *)qp->r_sge.sge.vaddr; |
3083 | sdata = wqe->atomic_wr.compare_add; |
3084 | *(u64 *)sqp->s_sge.sge.vaddr = |
3085 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
3086 | (u64)atomic64_add_return(i: sdata, v: maddr) - sdata : |
3087 | (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, |
3088 | sdata, wqe->atomic_wr.swap); |
3089 | rvt_put_mr(mr: qp->r_sge.sge.mr); |
3090 | qp->r_sge.num_sge = 0; |
3091 | goto send_comp; |
3092 | |
3093 | default: |
3094 | send_status = IB_WC_LOC_QP_OP_ERR; |
3095 | goto serr; |
3096 | } |
3097 | |
3098 | sge = &sqp->s_sge.sge; |
3099 | while (sqp->s_len) { |
3100 | u32 len = rvt_get_sge_length(sge, length: sqp->s_len); |
3101 | |
3102 | WARN_ON_ONCE(len == 0); |
3103 | rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, |
3104 | len, release, copy_last); |
3105 | rvt_update_sge(ss: &sqp->s_sge, length: len, release: !release); |
3106 | sqp->s_len -= len; |
3107 | } |
3108 | if (release) |
3109 | rvt_put_ss(ss: &qp->r_sge); |
3110 | |
3111 | if (!test_and_clear_bit(RVT_R_WRID_VALID, addr: &qp->r_aflags)) |
3112 | goto send_comp; |
3113 | |
3114 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
3115 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
3116 | else |
3117 | wc.opcode = IB_WC_RECV; |
3118 | wc.wr_id = qp->r_wr_id; |
3119 | wc.status = IB_WC_SUCCESS; |
3120 | wc.byte_len = wqe->length; |
3121 | wc.qp = &qp->ibqp; |
3122 | wc.src_qp = qp->remote_qpn; |
3123 | wc.slid = rdma_ah_get_dlid(attr: &qp->remote_ah_attr) & U16_MAX; |
3124 | wc.sl = rdma_ah_get_sl(attr: &qp->remote_ah_attr); |
3125 | wc.port_num = 1; |
3126 | /* Signal completion event if the solicited bit is set. */ |
3127 | rvt_recv_cq(qp, wc: &wc, solicited: wqe->wr.send_flags & IB_SEND_SOLICITED); |
3128 | |
3129 | send_comp: |
3130 | spin_unlock_irqrestore(lock: &qp->r_lock, flags); |
3131 | spin_lock_irqsave(&sqp->s_lock, flags); |
3132 | rvp->n_loop_pkts++; |
3133 | flush_send: |
3134 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
3135 | spin_lock(lock: &sqp->r_lock); |
3136 | rvt_send_complete(sqp, wqe, send_status); |
3137 | spin_unlock(lock: &sqp->r_lock); |
3138 | if (local_ops) { |
3139 | atomic_dec(v: &sqp->local_ops_pending); |
3140 | local_ops = 0; |
3141 | } |
3142 | goto again; |
3143 | |
3144 | rnr_nak: |
3145 | /* Handle RNR NAK */ |
3146 | if (qp->ibqp.qp_type == IB_QPT_UC) |
3147 | goto send_comp; |
3148 | rvp->n_rnr_naks++; |
3149 | /* |
3150 | * Note: we don't need the s_lock held since the BUSY flag |
3151 | * makes this single threaded. |
3152 | */ |
3153 | if (sqp->s_rnr_retry == 0) { |
3154 | send_status = IB_WC_RNR_RETRY_EXC_ERR; |
3155 | goto serr; |
3156 | } |
3157 | if (sqp->s_rnr_retry_cnt < 7) |
3158 | sqp->s_rnr_retry--; |
3159 | spin_unlock_irqrestore(lock: &qp->r_lock, flags); |
3160 | spin_lock_irqsave(&sqp->s_lock, flags); |
3161 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) |
3162 | goto clr_busy; |
3163 | rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer << |
3164 | IB_AETH_CREDIT_SHIFT); |
3165 | goto clr_busy; |
3166 | |
3167 | op_err: |
3168 | send_status = IB_WC_REM_OP_ERR; |
3169 | wc.status = IB_WC_LOC_QP_OP_ERR; |
3170 | goto err; |
3171 | |
3172 | inv_err: |
3173 | send_status = |
3174 | sqp->ibqp.qp_type == IB_QPT_RC ? |
3175 | IB_WC_REM_INV_REQ_ERR : |
3176 | IB_WC_SUCCESS; |
3177 | wc.status = IB_WC_LOC_QP_OP_ERR; |
3178 | goto err; |
3179 | |
3180 | acc_err: |
3181 | send_status = IB_WC_REM_ACCESS_ERR; |
3182 | wc.status = IB_WC_LOC_PROT_ERR; |
3183 | err: |
3184 | /* responder goes to error state */ |
3185 | rvt_rc_error(qp, wc.status); |
3186 | |
3187 | serr: |
3188 | spin_unlock_irqrestore(lock: &qp->r_lock, flags); |
3189 | serr_no_r_lock: |
3190 | spin_lock_irqsave(&sqp->s_lock, flags); |
3191 | spin_lock(lock: &sqp->r_lock); |
3192 | rvt_send_complete(sqp, wqe, send_status); |
3193 | spin_unlock(lock: &sqp->r_lock); |
3194 | if (sqp->ibqp.qp_type == IB_QPT_RC) { |
3195 | int lastwqe; |
3196 | |
3197 | spin_lock(lock: &sqp->r_lock); |
3198 | lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); |
3199 | spin_unlock(lock: &sqp->r_lock); |
3200 | |
3201 | sqp->s_flags &= ~RVT_S_BUSY; |
3202 | spin_unlock_irqrestore(lock: &sqp->s_lock, flags); |
3203 | if (lastwqe) { |
3204 | struct ib_event ev; |
3205 | |
3206 | ev.device = sqp->ibqp.device; |
3207 | ev.element.qp = &sqp->ibqp; |
3208 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
3209 | sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); |
3210 | } |
3211 | goto done; |
3212 | } |
3213 | clr_busy: |
3214 | sqp->s_flags &= ~RVT_S_BUSY; |
3215 | unlock: |
3216 | spin_unlock_irqrestore(lock: &sqp->s_lock, flags); |
3217 | done: |
3218 | rcu_read_unlock(); |
3219 | } |
3220 | EXPORT_SYMBOL(rvt_ruc_loopback); |
3221 | |