1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. |
4 | * All rights reserved. |
5 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * OpenIB.org BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or |
14 | * without modification, are permitted provided that the following |
15 | * conditions are met: |
16 | * |
17 | * - Redistributions of source code must retain the above |
18 | * copyright notice, this list of conditions and the following |
19 | * disclaimer. |
20 | * |
21 | * - Redistributions in binary form must reproduce the above |
22 | * copyright notice, this list of conditions and the following |
23 | * disclaimer in the documentation and/or other materials |
24 | * provided with the distribution. |
25 | * |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
33 | * SOFTWARE. |
34 | */ |
35 | |
36 | #include <linux/sched.h> |
37 | #include <linux/pci.h> |
38 | #include <linux/errno.h> |
39 | #include <linux/kernel.h> |
40 | #include <linux/io.h> |
41 | #include <linux/slab.h> |
42 | #include <linux/mlx4/cmd.h> |
43 | #include <linux/mlx4/qp.h> |
44 | #include <linux/if_ether.h> |
45 | #include <linux/etherdevice.h> |
46 | |
47 | #include "mlx4.h" |
48 | #include "fw.h" |
49 | #include "mlx4_stats.h" |
50 | |
51 | #define MLX4_MAC_VALID (1ull << 63) |
52 | #define MLX4_PF_COUNTERS_PER_PORT 2 |
53 | #define MLX4_VF_COUNTERS_PER_PORT 1 |
54 | |
55 | struct mac_res { |
56 | struct list_head list; |
57 | u64 mac; |
58 | int ref_count; |
59 | u8 smac_index; |
60 | u8 port; |
61 | }; |
62 | |
63 | struct vlan_res { |
64 | struct list_head list; |
65 | u16 vlan; |
66 | int ref_count; |
67 | int vlan_index; |
68 | u8 port; |
69 | }; |
70 | |
71 | struct res_common { |
72 | struct list_head list; |
73 | struct rb_node node; |
74 | u64 res_id; |
75 | int owner; |
76 | int state; |
77 | int from_state; |
78 | int to_state; |
79 | int removing; |
80 | const char *func_name; |
81 | }; |
82 | |
83 | enum { |
84 | RES_ANY_BUSY = 1 |
85 | }; |
86 | |
87 | struct res_gid { |
88 | struct list_head list; |
89 | u8 gid[16]; |
90 | enum mlx4_protocol prot; |
91 | enum mlx4_steer_type steer; |
92 | u64 reg_id; |
93 | }; |
94 | |
95 | enum res_qp_states { |
96 | RES_QP_BUSY = RES_ANY_BUSY, |
97 | |
98 | /* QP number was allocated */ |
99 | RES_QP_RESERVED, |
100 | |
101 | /* ICM memory for QP context was mapped */ |
102 | RES_QP_MAPPED, |
103 | |
104 | /* QP is in hw ownership */ |
105 | RES_QP_HW |
106 | }; |
107 | |
108 | struct res_qp { |
109 | struct res_common com; |
110 | struct res_mtt *mtt; |
111 | struct res_cq *rcq; |
112 | struct res_cq *scq; |
113 | struct res_srq *srq; |
114 | struct list_head mcg_list; |
115 | spinlock_t mcg_spl; |
116 | int local_qpn; |
117 | atomic_t ref_count; |
118 | u32 qpc_flags; |
119 | /* saved qp params before VST enforcement in order to restore on VGT */ |
120 | u8 sched_queue; |
121 | __be32 param3; |
122 | u8 vlan_control; |
123 | u8 fvl_rx; |
124 | u8 pri_path_fl; |
125 | u8 vlan_index; |
126 | u8 feup; |
127 | }; |
128 | |
129 | enum res_mtt_states { |
130 | RES_MTT_BUSY = RES_ANY_BUSY, |
131 | RES_MTT_ALLOCATED, |
132 | }; |
133 | |
134 | static inline const char *mtt_states_str(enum res_mtt_states state) |
135 | { |
136 | switch (state) { |
137 | case RES_MTT_BUSY: return "RES_MTT_BUSY" ; |
138 | case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED" ; |
139 | default: return "Unknown" ; |
140 | } |
141 | } |
142 | |
143 | struct res_mtt { |
144 | struct res_common com; |
145 | int order; |
146 | atomic_t ref_count; |
147 | }; |
148 | |
149 | enum res_mpt_states { |
150 | RES_MPT_BUSY = RES_ANY_BUSY, |
151 | RES_MPT_RESERVED, |
152 | RES_MPT_MAPPED, |
153 | RES_MPT_HW, |
154 | }; |
155 | |
156 | struct res_mpt { |
157 | struct res_common com; |
158 | struct res_mtt *mtt; |
159 | int key; |
160 | }; |
161 | |
162 | enum res_eq_states { |
163 | RES_EQ_BUSY = RES_ANY_BUSY, |
164 | RES_EQ_RESERVED, |
165 | RES_EQ_HW, |
166 | }; |
167 | |
168 | struct res_eq { |
169 | struct res_common com; |
170 | struct res_mtt *mtt; |
171 | }; |
172 | |
173 | enum res_cq_states { |
174 | RES_CQ_BUSY = RES_ANY_BUSY, |
175 | RES_CQ_ALLOCATED, |
176 | RES_CQ_HW, |
177 | }; |
178 | |
179 | struct res_cq { |
180 | struct res_common com; |
181 | struct res_mtt *mtt; |
182 | atomic_t ref_count; |
183 | }; |
184 | |
185 | enum res_srq_states { |
186 | RES_SRQ_BUSY = RES_ANY_BUSY, |
187 | RES_SRQ_ALLOCATED, |
188 | RES_SRQ_HW, |
189 | }; |
190 | |
191 | struct res_srq { |
192 | struct res_common com; |
193 | struct res_mtt *mtt; |
194 | struct res_cq *cq; |
195 | atomic_t ref_count; |
196 | }; |
197 | |
198 | enum res_counter_states { |
199 | RES_COUNTER_BUSY = RES_ANY_BUSY, |
200 | RES_COUNTER_ALLOCATED, |
201 | }; |
202 | |
203 | struct res_counter { |
204 | struct res_common com; |
205 | int port; |
206 | }; |
207 | |
208 | enum res_xrcdn_states { |
209 | RES_XRCD_BUSY = RES_ANY_BUSY, |
210 | RES_XRCD_ALLOCATED, |
211 | }; |
212 | |
213 | struct res_xrcdn { |
214 | struct res_common com; |
215 | int port; |
216 | }; |
217 | |
218 | enum res_fs_rule_states { |
219 | RES_FS_RULE_BUSY = RES_ANY_BUSY, |
220 | RES_FS_RULE_ALLOCATED, |
221 | }; |
222 | |
223 | struct res_fs_rule { |
224 | struct res_common com; |
225 | int qpn; |
226 | /* VF DMFS mbox with port flipped */ |
227 | void *mirr_mbox; |
228 | /* > 0 --> apply mirror when getting into HA mode */ |
229 | /* = 0 --> un-apply mirror when getting out of HA mode */ |
230 | u32 mirr_mbox_size; |
231 | struct list_head mirr_list; |
232 | u64 mirr_rule_id; |
233 | }; |
234 | |
235 | static void *res_tracker_lookup(struct rb_root *root, u64 res_id) |
236 | { |
237 | struct rb_node *node = root->rb_node; |
238 | |
239 | while (node) { |
240 | struct res_common *res = rb_entry(node, struct res_common, |
241 | node); |
242 | |
243 | if (res_id < res->res_id) |
244 | node = node->rb_left; |
245 | else if (res_id > res->res_id) |
246 | node = node->rb_right; |
247 | else |
248 | return res; |
249 | } |
250 | return NULL; |
251 | } |
252 | |
253 | static int res_tracker_insert(struct rb_root *root, struct res_common *res) |
254 | { |
255 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
256 | |
257 | /* Figure out where to put new node */ |
258 | while (*new) { |
259 | struct res_common *this = rb_entry(*new, struct res_common, |
260 | node); |
261 | |
262 | parent = *new; |
263 | if (res->res_id < this->res_id) |
264 | new = &((*new)->rb_left); |
265 | else if (res->res_id > this->res_id) |
266 | new = &((*new)->rb_right); |
267 | else |
268 | return -EEXIST; |
269 | } |
270 | |
271 | /* Add new node and rebalance tree. */ |
272 | rb_link_node(node: &res->node, parent, rb_link: new); |
273 | rb_insert_color(&res->node, root); |
274 | |
275 | return 0; |
276 | } |
277 | |
278 | enum qp_transition { |
279 | QP_TRANS_INIT2RTR, |
280 | QP_TRANS_RTR2RTS, |
281 | QP_TRANS_RTS2RTS, |
282 | QP_TRANS_SQERR2RTS, |
283 | QP_TRANS_SQD2SQD, |
284 | QP_TRANS_SQD2RTS |
285 | }; |
286 | |
287 | /* For Debug uses */ |
288 | static const char *resource_str(enum mlx4_resource rt) |
289 | { |
290 | switch (rt) { |
291 | case RES_QP: return "RES_QP" ; |
292 | case RES_CQ: return "RES_CQ" ; |
293 | case RES_SRQ: return "RES_SRQ" ; |
294 | case RES_MPT: return "RES_MPT" ; |
295 | case RES_MTT: return "RES_MTT" ; |
296 | case RES_MAC: return "RES_MAC" ; |
297 | case RES_VLAN: return "RES_VLAN" ; |
298 | case RES_EQ: return "RES_EQ" ; |
299 | case RES_COUNTER: return "RES_COUNTER" ; |
300 | case RES_FS_RULE: return "RES_FS_RULE" ; |
301 | case RES_XRCD: return "RES_XRCD" ; |
302 | default: return "Unknown resource type !!!" ; |
303 | } |
304 | } |
305 | |
306 | static void rem_slave_vlans(struct mlx4_dev *dev, int slave); |
307 | static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, |
308 | enum mlx4_resource res_type, int count, |
309 | int port) |
310 | { |
311 | struct mlx4_priv *priv = mlx4_priv(dev); |
312 | struct resource_allocator *res_alloc = |
313 | &priv->mfunc.master.res_tracker.res_alloc[res_type]; |
314 | int err = -EDQUOT; |
315 | int allocated, free, reserved, guaranteed, from_free; |
316 | int from_rsvd; |
317 | |
318 | if (slave > dev->persist->num_vfs) |
319 | return -EINVAL; |
320 | |
321 | spin_lock(lock: &res_alloc->alloc_lock); |
322 | allocated = (port > 0) ? |
323 | res_alloc->allocated[(port - 1) * |
324 | (dev->persist->num_vfs + 1) + slave] : |
325 | res_alloc->allocated[slave]; |
326 | free = (port > 0) ? res_alloc->res_port_free[port - 1] : |
327 | res_alloc->res_free; |
328 | reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : |
329 | res_alloc->res_reserved; |
330 | guaranteed = res_alloc->guaranteed[slave]; |
331 | |
332 | if (allocated + count > res_alloc->quota[slave]) { |
333 | mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n" , |
334 | slave, port, resource_str(res_type), count, |
335 | allocated, res_alloc->quota[slave]); |
336 | goto out; |
337 | } |
338 | |
339 | if (allocated + count <= guaranteed) { |
340 | err = 0; |
341 | from_rsvd = count; |
342 | } else { |
343 | /* portion may need to be obtained from free area */ |
344 | if (guaranteed - allocated > 0) |
345 | from_free = count - (guaranteed - allocated); |
346 | else |
347 | from_free = count; |
348 | |
349 | from_rsvd = count - from_free; |
350 | |
351 | if (free - from_free >= reserved) |
352 | err = 0; |
353 | else |
354 | mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n" , |
355 | slave, port, resource_str(res_type), free, |
356 | from_free, reserved); |
357 | } |
358 | |
359 | if (!err) { |
360 | /* grant the request */ |
361 | if (port > 0) { |
362 | res_alloc->allocated[(port - 1) * |
363 | (dev->persist->num_vfs + 1) + slave] += count; |
364 | res_alloc->res_port_free[port - 1] -= count; |
365 | res_alloc->res_port_rsvd[port - 1] -= from_rsvd; |
366 | } else { |
367 | res_alloc->allocated[slave] += count; |
368 | res_alloc->res_free -= count; |
369 | res_alloc->res_reserved -= from_rsvd; |
370 | } |
371 | } |
372 | |
373 | out: |
374 | spin_unlock(lock: &res_alloc->alloc_lock); |
375 | return err; |
376 | } |
377 | |
378 | static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, |
379 | enum mlx4_resource res_type, int count, |
380 | int port) |
381 | { |
382 | struct mlx4_priv *priv = mlx4_priv(dev); |
383 | struct resource_allocator *res_alloc = |
384 | &priv->mfunc.master.res_tracker.res_alloc[res_type]; |
385 | int allocated, guaranteed, from_rsvd; |
386 | |
387 | if (slave > dev->persist->num_vfs) |
388 | return; |
389 | |
390 | spin_lock(lock: &res_alloc->alloc_lock); |
391 | |
392 | allocated = (port > 0) ? |
393 | res_alloc->allocated[(port - 1) * |
394 | (dev->persist->num_vfs + 1) + slave] : |
395 | res_alloc->allocated[slave]; |
396 | guaranteed = res_alloc->guaranteed[slave]; |
397 | |
398 | if (allocated - count >= guaranteed) { |
399 | from_rsvd = 0; |
400 | } else { |
401 | /* portion may need to be returned to reserved area */ |
402 | if (allocated - guaranteed > 0) |
403 | from_rsvd = count - (allocated - guaranteed); |
404 | else |
405 | from_rsvd = count; |
406 | } |
407 | |
408 | if (port > 0) { |
409 | res_alloc->allocated[(port - 1) * |
410 | (dev->persist->num_vfs + 1) + slave] -= count; |
411 | res_alloc->res_port_free[port - 1] += count; |
412 | res_alloc->res_port_rsvd[port - 1] += from_rsvd; |
413 | } else { |
414 | res_alloc->allocated[slave] -= count; |
415 | res_alloc->res_free += count; |
416 | res_alloc->res_reserved += from_rsvd; |
417 | } |
418 | |
419 | spin_unlock(lock: &res_alloc->alloc_lock); |
420 | return; |
421 | } |
422 | |
423 | static inline void initialize_res_quotas(struct mlx4_dev *dev, |
424 | struct resource_allocator *res_alloc, |
425 | enum mlx4_resource res_type, |
426 | int vf, int num_instances) |
427 | { |
428 | res_alloc->guaranteed[vf] = num_instances / |
429 | (2 * (dev->persist->num_vfs + 1)); |
430 | res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; |
431 | if (vf == mlx4_master_func_num(dev)) { |
432 | res_alloc->res_free = num_instances; |
433 | if (res_type == RES_MTT) { |
434 | /* reserved mtts will be taken out of the PF allocation */ |
435 | res_alloc->res_free += dev->caps.reserved_mtts; |
436 | res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; |
437 | res_alloc->quota[vf] += dev->caps.reserved_mtts; |
438 | } |
439 | } |
440 | } |
441 | |
442 | void mlx4_init_quotas(struct mlx4_dev *dev) |
443 | { |
444 | struct mlx4_priv *priv = mlx4_priv(dev); |
445 | int pf; |
446 | |
447 | /* quotas for VFs are initialized in mlx4_slave_cap */ |
448 | if (mlx4_is_slave(dev)) |
449 | return; |
450 | |
451 | if (!mlx4_is_mfunc(dev)) { |
452 | dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - |
453 | mlx4_num_reserved_sqps(dev); |
454 | dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; |
455 | dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; |
456 | dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; |
457 | dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; |
458 | return; |
459 | } |
460 | |
461 | pf = mlx4_master_func_num(dev); |
462 | dev->quotas.qp = |
463 | priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; |
464 | dev->quotas.cq = |
465 | priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; |
466 | dev->quotas.srq = |
467 | priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; |
468 | dev->quotas.mtt = |
469 | priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; |
470 | dev->quotas.mpt = |
471 | priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; |
472 | } |
473 | |
474 | static int |
475 | mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, |
476 | struct resource_allocator *res_alloc, |
477 | int vf) |
478 | { |
479 | struct mlx4_active_ports actv_ports; |
480 | int ports, counters_guaranteed; |
481 | |
482 | /* For master, only allocate according to the number of phys ports */ |
483 | if (vf == mlx4_master_func_num(dev)) |
484 | return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; |
485 | |
486 | /* calculate real number of ports for the VF */ |
487 | actv_ports = mlx4_get_active_ports(dev, slave: vf); |
488 | ports = bitmap_weight(src: actv_ports.ports, nbits: dev->caps.num_ports); |
489 | counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT; |
490 | |
491 | /* If we do not have enough counters for this VF, do not |
492 | * allocate any for it. '-1' to reduce the sink counter. |
493 | */ |
494 | if ((res_alloc->res_reserved + counters_guaranteed) > |
495 | (dev->caps.max_counters - 1)) |
496 | return 0; |
497 | |
498 | return counters_guaranteed; |
499 | } |
500 | |
501 | int mlx4_init_resource_tracker(struct mlx4_dev *dev) |
502 | { |
503 | struct mlx4_priv *priv = mlx4_priv(dev); |
504 | int i, j; |
505 | int t; |
506 | |
507 | priv->mfunc.master.res_tracker.slave_list = |
508 | kcalloc(n: dev->num_slaves, size: sizeof(struct slave_list), |
509 | GFP_KERNEL); |
510 | if (!priv->mfunc.master.res_tracker.slave_list) |
511 | return -ENOMEM; |
512 | |
513 | for (i = 0 ; i < dev->num_slaves; i++) { |
514 | for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) |
515 | INIT_LIST_HEAD(list: &priv->mfunc.master.res_tracker. |
516 | slave_list[i].res_list[t]); |
517 | mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); |
518 | } |
519 | |
520 | mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n" , |
521 | dev->num_slaves); |
522 | for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) |
523 | priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; |
524 | |
525 | for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { |
526 | struct resource_allocator *res_alloc = |
527 | &priv->mfunc.master.res_tracker.res_alloc[i]; |
528 | res_alloc->quota = kmalloc_array(n: dev->persist->num_vfs + 1, |
529 | size: sizeof(int), |
530 | GFP_KERNEL); |
531 | res_alloc->guaranteed = kmalloc_array(n: dev->persist->num_vfs + 1, |
532 | size: sizeof(int), |
533 | GFP_KERNEL); |
534 | if (i == RES_MAC || i == RES_VLAN) |
535 | res_alloc->allocated = |
536 | kcalloc(n: MLX4_MAX_PORTS * |
537 | (dev->persist->num_vfs + 1), |
538 | size: sizeof(int), GFP_KERNEL); |
539 | else |
540 | res_alloc->allocated = |
541 | kcalloc(n: dev->persist->num_vfs + 1, |
542 | size: sizeof(int), GFP_KERNEL); |
543 | /* Reduce the sink counter */ |
544 | if (i == RES_COUNTER) |
545 | res_alloc->res_free = dev->caps.max_counters - 1; |
546 | |
547 | if (!res_alloc->quota || !res_alloc->guaranteed || |
548 | !res_alloc->allocated) |
549 | goto no_mem_err; |
550 | |
551 | spin_lock_init(&res_alloc->alloc_lock); |
552 | for (t = 0; t < dev->persist->num_vfs + 1; t++) { |
553 | struct mlx4_active_ports actv_ports = |
554 | mlx4_get_active_ports(dev, slave: t); |
555 | switch (i) { |
556 | case RES_QP: |
557 | initialize_res_quotas(dev, res_alloc, res_type: RES_QP, |
558 | vf: t, num_instances: dev->caps.num_qps - |
559 | dev->caps.reserved_qps - |
560 | mlx4_num_reserved_sqps(dev)); |
561 | break; |
562 | case RES_CQ: |
563 | initialize_res_quotas(dev, res_alloc, res_type: RES_CQ, |
564 | vf: t, num_instances: dev->caps.num_cqs - |
565 | dev->caps.reserved_cqs); |
566 | break; |
567 | case RES_SRQ: |
568 | initialize_res_quotas(dev, res_alloc, res_type: RES_SRQ, |
569 | vf: t, num_instances: dev->caps.num_srqs - |
570 | dev->caps.reserved_srqs); |
571 | break; |
572 | case RES_MPT: |
573 | initialize_res_quotas(dev, res_alloc, res_type: RES_MPT, |
574 | vf: t, num_instances: dev->caps.num_mpts - |
575 | dev->caps.reserved_mrws); |
576 | break; |
577 | case RES_MTT: |
578 | initialize_res_quotas(dev, res_alloc, res_type: RES_MTT, |
579 | vf: t, num_instances: dev->caps.num_mtts - |
580 | dev->caps.reserved_mtts); |
581 | break; |
582 | case RES_MAC: |
583 | if (t == mlx4_master_func_num(dev)) { |
584 | int max_vfs_pport = 0; |
585 | /* Calculate the max vfs per port for */ |
586 | /* both ports. */ |
587 | for (j = 0; j < dev->caps.num_ports; |
588 | j++) { |
589 | struct mlx4_slaves_pport slaves_pport = |
590 | mlx4_phys_to_slaves_pport(dev, port: j + 1); |
591 | unsigned current_slaves = |
592 | bitmap_weight(src: slaves_pport.slaves, |
593 | nbits: dev->caps.num_ports) - 1; |
594 | if (max_vfs_pport < current_slaves) |
595 | max_vfs_pport = |
596 | current_slaves; |
597 | } |
598 | res_alloc->quota[t] = |
599 | MLX4_MAX_MAC_NUM - |
600 | 2 * max_vfs_pport; |
601 | res_alloc->guaranteed[t] = 2; |
602 | for (j = 0; j < MLX4_MAX_PORTS; j++) |
603 | res_alloc->res_port_free[j] = |
604 | MLX4_MAX_MAC_NUM; |
605 | } else { |
606 | res_alloc->quota[t] = MLX4_MAX_MAC_NUM; |
607 | res_alloc->guaranteed[t] = 2; |
608 | } |
609 | break; |
610 | case RES_VLAN: |
611 | if (t == mlx4_master_func_num(dev)) { |
612 | res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; |
613 | res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; |
614 | for (j = 0; j < MLX4_MAX_PORTS; j++) |
615 | res_alloc->res_port_free[j] = |
616 | res_alloc->quota[t]; |
617 | } else { |
618 | res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; |
619 | res_alloc->guaranteed[t] = 0; |
620 | } |
621 | break; |
622 | case RES_COUNTER: |
623 | res_alloc->quota[t] = dev->caps.max_counters; |
624 | res_alloc->guaranteed[t] = |
625 | mlx4_calc_res_counter_guaranteed(dev, res_alloc, vf: t); |
626 | break; |
627 | default: |
628 | break; |
629 | } |
630 | if (i == RES_MAC || i == RES_VLAN) { |
631 | for (j = 0; j < dev->caps.num_ports; j++) |
632 | if (test_bit(j, actv_ports.ports)) |
633 | res_alloc->res_port_rsvd[j] += |
634 | res_alloc->guaranteed[t]; |
635 | } else { |
636 | res_alloc->res_reserved += res_alloc->guaranteed[t]; |
637 | } |
638 | } |
639 | } |
640 | spin_lock_init(&priv->mfunc.master.res_tracker.lock); |
641 | return 0; |
642 | |
643 | no_mem_err: |
644 | for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { |
645 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].allocated); |
646 | priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; |
647 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); |
648 | priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; |
649 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].quota); |
650 | priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; |
651 | } |
652 | return -ENOMEM; |
653 | } |
654 | |
655 | void mlx4_free_resource_tracker(struct mlx4_dev *dev, |
656 | enum mlx4_res_tracker_free_type type) |
657 | { |
658 | struct mlx4_priv *priv = mlx4_priv(dev); |
659 | int i; |
660 | |
661 | if (priv->mfunc.master.res_tracker.slave_list) { |
662 | if (type != RES_TR_FREE_STRUCTS_ONLY) { |
663 | for (i = 0; i < dev->num_slaves; i++) { |
664 | if (type == RES_TR_FREE_ALL || |
665 | dev->caps.function != i) |
666 | mlx4_delete_all_resources_for_slave(dev, slave_id: i); |
667 | } |
668 | /* free master's vlans */ |
669 | i = dev->caps.function; |
670 | mlx4_reset_roce_gids(dev, slave: i); |
671 | mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); |
672 | rem_slave_vlans(dev, slave: i); |
673 | mutex_unlock(lock: &priv->mfunc.master.res_tracker.slave_list[i].mutex); |
674 | } |
675 | |
676 | if (type != RES_TR_FREE_SLAVES_ONLY) { |
677 | for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { |
678 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].allocated); |
679 | priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; |
680 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); |
681 | priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; |
682 | kfree(objp: priv->mfunc.master.res_tracker.res_alloc[i].quota); |
683 | priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; |
684 | } |
685 | kfree(objp: priv->mfunc.master.res_tracker.slave_list); |
686 | priv->mfunc.master.res_tracker.slave_list = NULL; |
687 | } |
688 | } |
689 | } |
690 | |
691 | static void update_pkey_index(struct mlx4_dev *dev, int slave, |
692 | struct mlx4_cmd_mailbox *inbox) |
693 | { |
694 | u8 sched = *(u8 *)(inbox->buf + 64); |
695 | u8 orig_index = *(u8 *)(inbox->buf + 35); |
696 | u8 new_index; |
697 | struct mlx4_priv *priv = mlx4_priv(dev); |
698 | int port; |
699 | |
700 | port = (sched >> 6 & 1) + 1; |
701 | |
702 | new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; |
703 | *(u8 *)(inbox->buf + 35) = new_index; |
704 | } |
705 | |
706 | static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, |
707 | u8 slave) |
708 | { |
709 | struct mlx4_qp_context *qp_ctx = inbox->buf + 8; |
710 | enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); |
711 | u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; |
712 | int port; |
713 | |
714 | if (MLX4_QP_ST_UD == ts) { |
715 | port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; |
716 | if (mlx4_is_eth(dev, port)) |
717 | qp_ctx->pri_path.mgid_index = |
718 | mlx4_get_base_gid_ix(dev, slave, port) | 0x80; |
719 | else |
720 | qp_ctx->pri_path.mgid_index = slave | 0x80; |
721 | |
722 | } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { |
723 | if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { |
724 | port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; |
725 | if (mlx4_is_eth(dev, port)) { |
726 | qp_ctx->pri_path.mgid_index += |
727 | mlx4_get_base_gid_ix(dev, slave, port); |
728 | qp_ctx->pri_path.mgid_index &= 0x7f; |
729 | } else { |
730 | qp_ctx->pri_path.mgid_index = slave & 0x7F; |
731 | } |
732 | } |
733 | if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { |
734 | port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; |
735 | if (mlx4_is_eth(dev, port)) { |
736 | qp_ctx->alt_path.mgid_index += |
737 | mlx4_get_base_gid_ix(dev, slave, port); |
738 | qp_ctx->alt_path.mgid_index &= 0x7f; |
739 | } else { |
740 | qp_ctx->alt_path.mgid_index = slave & 0x7F; |
741 | } |
742 | } |
743 | } |
744 | } |
745 | |
746 | static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, |
747 | u8 slave, int port); |
748 | |
749 | static int update_vport_qp_param(struct mlx4_dev *dev, |
750 | struct mlx4_cmd_mailbox *inbox, |
751 | u8 slave, u32 qpn) |
752 | { |
753 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
754 | struct mlx4_vport_oper_state *vp_oper; |
755 | struct mlx4_priv *priv; |
756 | u32 qp_type; |
757 | int port, err = 0; |
758 | |
759 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
760 | priv = mlx4_priv(dev); |
761 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
762 | qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; |
763 | |
764 | err = handle_counter(dev, qpc, slave, port); |
765 | if (err) |
766 | goto out; |
767 | |
768 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
769 | /* the reserved QPs (special, proxy, tunnel) |
770 | * do not operate over vlans |
771 | */ |
772 | if (mlx4_is_qp_reserved(dev, qpn)) |
773 | return 0; |
774 | |
775 | /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ |
776 | if (qp_type == MLX4_QP_ST_UD || |
777 | (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { |
778 | if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { |
779 | *(__be32 *)inbox->buf = |
780 | cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | |
781 | MLX4_QP_OPTPAR_VLAN_STRIPPING); |
782 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); |
783 | } else { |
784 | struct mlx4_update_qp_params params = {.flags = 0}; |
785 | |
786 | err = mlx4_update_qp(dev, qpn, attr: MLX4_UPDATE_QP_VSD, params: ¶ms); |
787 | if (err) |
788 | goto out; |
789 | } |
790 | } |
791 | |
792 | /* preserve IF_COUNTER flag */ |
793 | qpc->pri_path.vlan_control &= |
794 | MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; |
795 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && |
796 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { |
797 | qpc->pri_path.vlan_control |= |
798 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
799 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | |
800 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | |
801 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
802 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | |
803 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; |
804 | } else if (0 != vp_oper->state.default_vlan) { |
805 | if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) { |
806 | /* vst QinQ should block untagged on TX, |
807 | * but cvlan is in payload and phv is set so |
808 | * hw see it as untagged. Block tagged instead. |
809 | */ |
810 | qpc->pri_path.vlan_control |= |
811 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | |
812 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
813 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
814 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
815 | } else { /* vst 802.1Q */ |
816 | qpc->pri_path.vlan_control |= |
817 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
818 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
819 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
820 | } |
821 | } else { /* priority tagged */ |
822 | qpc->pri_path.vlan_control |= |
823 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
824 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; |
825 | } |
826 | |
827 | qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; |
828 | qpc->pri_path.vlan_index = vp_oper->vlan_idx; |
829 | qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN; |
830 | if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) |
831 | qpc->pri_path.fl |= MLX4_FL_SV; |
832 | else |
833 | qpc->pri_path.fl |= MLX4_FL_CV; |
834 | qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; |
835 | qpc->pri_path.sched_queue &= 0xC7; |
836 | qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; |
837 | qpc->qos_vport = vp_oper->state.qos_vport; |
838 | } |
839 | if (vp_oper->state.spoofchk) { |
840 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; |
841 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; |
842 | } |
843 | out: |
844 | return err; |
845 | } |
846 | |
847 | static int mpt_mask(struct mlx4_dev *dev) |
848 | { |
849 | return dev->caps.num_mpts - 1; |
850 | } |
851 | |
852 | static const char *mlx4_resource_type_to_str(enum mlx4_resource t) |
853 | { |
854 | switch (t) { |
855 | case RES_QP: |
856 | return "QP" ; |
857 | case RES_CQ: |
858 | return "CQ" ; |
859 | case RES_SRQ: |
860 | return "SRQ" ; |
861 | case RES_XRCD: |
862 | return "XRCD" ; |
863 | case RES_MPT: |
864 | return "MPT" ; |
865 | case RES_MTT: |
866 | return "MTT" ; |
867 | case RES_MAC: |
868 | return "MAC" ; |
869 | case RES_VLAN: |
870 | return "VLAN" ; |
871 | case RES_COUNTER: |
872 | return "COUNTER" ; |
873 | case RES_FS_RULE: |
874 | return "FS_RULE" ; |
875 | case RES_EQ: |
876 | return "EQ" ; |
877 | default: |
878 | return "INVALID RESOURCE" ; |
879 | } |
880 | } |
881 | |
882 | static void *find_res(struct mlx4_dev *dev, u64 res_id, |
883 | enum mlx4_resource type) |
884 | { |
885 | struct mlx4_priv *priv = mlx4_priv(dev); |
886 | |
887 | return res_tracker_lookup(root: &priv->mfunc.master.res_tracker.res_tree[type], |
888 | res_id); |
889 | } |
890 | |
891 | static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, |
892 | enum mlx4_resource type, |
893 | void *res, const char *func_name) |
894 | { |
895 | struct res_common *r; |
896 | int err = 0; |
897 | |
898 | spin_lock_irq(lock: mlx4_tlock(dev)); |
899 | r = find_res(dev, res_id, type); |
900 | if (!r) { |
901 | err = -ENONET; |
902 | goto exit; |
903 | } |
904 | |
905 | if (r->state == RES_ANY_BUSY) { |
906 | mlx4_warn(dev, |
907 | "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n" , |
908 | func_name, slave, res_id, mlx4_resource_type_to_str(type), |
909 | r->func_name); |
910 | err = -EBUSY; |
911 | goto exit; |
912 | } |
913 | |
914 | if (r->owner != slave) { |
915 | err = -EPERM; |
916 | goto exit; |
917 | } |
918 | |
919 | r->from_state = r->state; |
920 | r->state = RES_ANY_BUSY; |
921 | r->func_name = func_name; |
922 | |
923 | if (res) |
924 | *((struct res_common **)res) = r; |
925 | |
926 | exit: |
927 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
928 | return err; |
929 | } |
930 | |
931 | #define get_res(dev, slave, res_id, type, res) \ |
932 | _get_res((dev), (slave), (res_id), (type), (res), __func__) |
933 | |
934 | int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, |
935 | enum mlx4_resource type, |
936 | u64 res_id, int *slave) |
937 | { |
938 | |
939 | struct res_common *r; |
940 | int err = -ENOENT; |
941 | int id = res_id; |
942 | |
943 | if (type == RES_QP) |
944 | id &= 0x7fffff; |
945 | spin_lock(lock: mlx4_tlock(dev)); |
946 | |
947 | r = find_res(dev, res_id: id, type); |
948 | if (r) { |
949 | *slave = r->owner; |
950 | err = 0; |
951 | } |
952 | spin_unlock(lock: mlx4_tlock(dev)); |
953 | |
954 | return err; |
955 | } |
956 | |
957 | static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, |
958 | enum mlx4_resource type) |
959 | { |
960 | struct res_common *r; |
961 | |
962 | spin_lock_irq(lock: mlx4_tlock(dev)); |
963 | r = find_res(dev, res_id, type); |
964 | if (r) { |
965 | r->state = r->from_state; |
966 | r->func_name = "" ; |
967 | } |
968 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
969 | } |
970 | |
971 | static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
972 | u64 in_param, u64 *out_param, int port); |
973 | |
974 | static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, |
975 | int counter_index) |
976 | { |
977 | struct res_common *r; |
978 | struct res_counter *counter; |
979 | int ret = 0; |
980 | |
981 | if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) |
982 | return ret; |
983 | |
984 | spin_lock_irq(lock: mlx4_tlock(dev)); |
985 | r = find_res(dev, res_id: counter_index, type: RES_COUNTER); |
986 | if (!r || r->owner != slave) { |
987 | ret = -EINVAL; |
988 | } else { |
989 | counter = container_of(r, struct res_counter, com); |
990 | if (!counter->port) |
991 | counter->port = port; |
992 | } |
993 | |
994 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
995 | return ret; |
996 | } |
997 | |
998 | static int handle_unexisting_counter(struct mlx4_dev *dev, |
999 | struct mlx4_qp_context *qpc, u8 slave, |
1000 | int port) |
1001 | { |
1002 | struct mlx4_priv *priv = mlx4_priv(dev); |
1003 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1004 | struct res_common *tmp; |
1005 | struct res_counter *counter; |
1006 | u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); |
1007 | int err = 0; |
1008 | |
1009 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1010 | list_for_each_entry(tmp, |
1011 | &tracker->slave_list[slave].res_list[RES_COUNTER], |
1012 | list) { |
1013 | counter = container_of(tmp, struct res_counter, com); |
1014 | if (port == counter->port) { |
1015 | qpc->pri_path.counter_index = counter->com.res_id; |
1016 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1017 | return 0; |
1018 | } |
1019 | } |
1020 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1021 | |
1022 | /* No existing counter, need to allocate a new counter */ |
1023 | err = counter_alloc_res(dev, slave, op: RES_OP_RESERVE, cmd: 0, in_param: 0, out_param: &counter_idx, |
1024 | port); |
1025 | if (err == -ENOENT) { |
1026 | err = 0; |
1027 | } else if (err && err != -ENOSPC) { |
1028 | mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n" , |
1029 | __func__, slave, err); |
1030 | } else { |
1031 | qpc->pri_path.counter_index = counter_idx; |
1032 | mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n" , |
1033 | __func__, slave, qpc->pri_path.counter_index); |
1034 | err = 0; |
1035 | } |
1036 | |
1037 | return err; |
1038 | } |
1039 | |
1040 | static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, |
1041 | u8 slave, int port) |
1042 | { |
1043 | if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) |
1044 | return handle_existing_counter(dev, slave, port, |
1045 | counter_index: qpc->pri_path.counter_index); |
1046 | |
1047 | return handle_unexisting_counter(dev, qpc, slave, port); |
1048 | } |
1049 | |
1050 | static struct res_common *alloc_qp_tr(int id) |
1051 | { |
1052 | struct res_qp *ret; |
1053 | |
1054 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1055 | if (!ret) |
1056 | return NULL; |
1057 | |
1058 | ret->com.res_id = id; |
1059 | ret->com.state = RES_QP_RESERVED; |
1060 | ret->local_qpn = id; |
1061 | INIT_LIST_HEAD(list: &ret->mcg_list); |
1062 | spin_lock_init(&ret->mcg_spl); |
1063 | atomic_set(v: &ret->ref_count, i: 0); |
1064 | |
1065 | return &ret->com; |
1066 | } |
1067 | |
1068 | static struct res_common *alloc_mtt_tr(int id, int order) |
1069 | { |
1070 | struct res_mtt *ret; |
1071 | |
1072 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1073 | if (!ret) |
1074 | return NULL; |
1075 | |
1076 | ret->com.res_id = id; |
1077 | ret->order = order; |
1078 | ret->com.state = RES_MTT_ALLOCATED; |
1079 | atomic_set(v: &ret->ref_count, i: 0); |
1080 | |
1081 | return &ret->com; |
1082 | } |
1083 | |
1084 | static struct res_common *alloc_mpt_tr(int id, int key) |
1085 | { |
1086 | struct res_mpt *ret; |
1087 | |
1088 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1089 | if (!ret) |
1090 | return NULL; |
1091 | |
1092 | ret->com.res_id = id; |
1093 | ret->com.state = RES_MPT_RESERVED; |
1094 | ret->key = key; |
1095 | |
1096 | return &ret->com; |
1097 | } |
1098 | |
1099 | static struct res_common *alloc_eq_tr(int id) |
1100 | { |
1101 | struct res_eq *ret; |
1102 | |
1103 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1104 | if (!ret) |
1105 | return NULL; |
1106 | |
1107 | ret->com.res_id = id; |
1108 | ret->com.state = RES_EQ_RESERVED; |
1109 | |
1110 | return &ret->com; |
1111 | } |
1112 | |
1113 | static struct res_common *alloc_cq_tr(int id) |
1114 | { |
1115 | struct res_cq *ret; |
1116 | |
1117 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1118 | if (!ret) |
1119 | return NULL; |
1120 | |
1121 | ret->com.res_id = id; |
1122 | ret->com.state = RES_CQ_ALLOCATED; |
1123 | atomic_set(v: &ret->ref_count, i: 0); |
1124 | |
1125 | return &ret->com; |
1126 | } |
1127 | |
1128 | static struct res_common *alloc_srq_tr(int id) |
1129 | { |
1130 | struct res_srq *ret; |
1131 | |
1132 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1133 | if (!ret) |
1134 | return NULL; |
1135 | |
1136 | ret->com.res_id = id; |
1137 | ret->com.state = RES_SRQ_ALLOCATED; |
1138 | atomic_set(v: &ret->ref_count, i: 0); |
1139 | |
1140 | return &ret->com; |
1141 | } |
1142 | |
1143 | static struct res_common *alloc_counter_tr(int id, int port) |
1144 | { |
1145 | struct res_counter *ret; |
1146 | |
1147 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1148 | if (!ret) |
1149 | return NULL; |
1150 | |
1151 | ret->com.res_id = id; |
1152 | ret->com.state = RES_COUNTER_ALLOCATED; |
1153 | ret->port = port; |
1154 | |
1155 | return &ret->com; |
1156 | } |
1157 | |
1158 | static struct res_common *alloc_xrcdn_tr(int id) |
1159 | { |
1160 | struct res_xrcdn *ret; |
1161 | |
1162 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1163 | if (!ret) |
1164 | return NULL; |
1165 | |
1166 | ret->com.res_id = id; |
1167 | ret->com.state = RES_XRCD_ALLOCATED; |
1168 | |
1169 | return &ret->com; |
1170 | } |
1171 | |
1172 | static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) |
1173 | { |
1174 | struct res_fs_rule *ret; |
1175 | |
1176 | ret = kzalloc(size: sizeof(*ret), GFP_KERNEL); |
1177 | if (!ret) |
1178 | return NULL; |
1179 | |
1180 | ret->com.res_id = id; |
1181 | ret->com.state = RES_FS_RULE_ALLOCATED; |
1182 | ret->qpn = qpn; |
1183 | return &ret->com; |
1184 | } |
1185 | |
1186 | static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, |
1187 | int ) |
1188 | { |
1189 | struct res_common *ret; |
1190 | |
1191 | switch (type) { |
1192 | case RES_QP: |
1193 | ret = alloc_qp_tr(id); |
1194 | break; |
1195 | case RES_MPT: |
1196 | ret = alloc_mpt_tr(id, key: extra); |
1197 | break; |
1198 | case RES_MTT: |
1199 | ret = alloc_mtt_tr(id, order: extra); |
1200 | break; |
1201 | case RES_EQ: |
1202 | ret = alloc_eq_tr(id); |
1203 | break; |
1204 | case RES_CQ: |
1205 | ret = alloc_cq_tr(id); |
1206 | break; |
1207 | case RES_SRQ: |
1208 | ret = alloc_srq_tr(id); |
1209 | break; |
1210 | case RES_MAC: |
1211 | pr_err("implementation missing\n" ); |
1212 | return NULL; |
1213 | case RES_COUNTER: |
1214 | ret = alloc_counter_tr(id, port: extra); |
1215 | break; |
1216 | case RES_XRCD: |
1217 | ret = alloc_xrcdn_tr(id); |
1218 | break; |
1219 | case RES_FS_RULE: |
1220 | ret = alloc_fs_rule_tr(id, qpn: extra); |
1221 | break; |
1222 | default: |
1223 | return NULL; |
1224 | } |
1225 | if (ret) |
1226 | ret->owner = slave; |
1227 | |
1228 | return ret; |
1229 | } |
1230 | |
1231 | int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, |
1232 | struct mlx4_counter *data) |
1233 | { |
1234 | struct mlx4_priv *priv = mlx4_priv(dev); |
1235 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1236 | struct res_common *tmp; |
1237 | struct res_counter *counter; |
1238 | int *counters_arr; |
1239 | int i = 0, err = 0; |
1240 | |
1241 | memset(data, 0, sizeof(*data)); |
1242 | |
1243 | counters_arr = kmalloc_array(n: dev->caps.max_counters, |
1244 | size: sizeof(*counters_arr), GFP_KERNEL); |
1245 | if (!counters_arr) |
1246 | return -ENOMEM; |
1247 | |
1248 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1249 | list_for_each_entry(tmp, |
1250 | &tracker->slave_list[slave].res_list[RES_COUNTER], |
1251 | list) { |
1252 | counter = container_of(tmp, struct res_counter, com); |
1253 | if (counter->port == port) { |
1254 | counters_arr[i] = (int)tmp->res_id; |
1255 | i++; |
1256 | } |
1257 | } |
1258 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1259 | counters_arr[i] = -1; |
1260 | |
1261 | i = 0; |
1262 | |
1263 | while (counters_arr[i] != -1) { |
1264 | err = mlx4_get_counter_stats(dev, counter_index: counters_arr[i], counter_stats: data, |
1265 | reset: 0); |
1266 | if (err) { |
1267 | memset(data, 0, sizeof(*data)); |
1268 | goto table_changed; |
1269 | } |
1270 | i++; |
1271 | } |
1272 | |
1273 | table_changed: |
1274 | kfree(objp: counters_arr); |
1275 | return 0; |
1276 | } |
1277 | |
1278 | static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, |
1279 | enum mlx4_resource type, int ) |
1280 | { |
1281 | int i; |
1282 | int err; |
1283 | struct mlx4_priv *priv = mlx4_priv(dev); |
1284 | struct res_common **res_arr; |
1285 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1286 | struct rb_root *root = &tracker->res_tree[type]; |
1287 | |
1288 | res_arr = kcalloc(n: count, size: sizeof(*res_arr), GFP_KERNEL); |
1289 | if (!res_arr) |
1290 | return -ENOMEM; |
1291 | |
1292 | for (i = 0; i < count; ++i) { |
1293 | res_arr[i] = alloc_tr(id: base + i, type, slave, extra); |
1294 | if (!res_arr[i]) { |
1295 | for (--i; i >= 0; --i) |
1296 | kfree(objp: res_arr[i]); |
1297 | |
1298 | kfree(objp: res_arr); |
1299 | return -ENOMEM; |
1300 | } |
1301 | } |
1302 | |
1303 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1304 | for (i = 0; i < count; ++i) { |
1305 | if (find_res(dev, res_id: base + i, type)) { |
1306 | err = -EEXIST; |
1307 | goto undo; |
1308 | } |
1309 | err = res_tracker_insert(root, res: res_arr[i]); |
1310 | if (err) |
1311 | goto undo; |
1312 | list_add_tail(new: &res_arr[i]->list, |
1313 | head: &tracker->slave_list[slave].res_list[type]); |
1314 | } |
1315 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1316 | kfree(objp: res_arr); |
1317 | |
1318 | return 0; |
1319 | |
1320 | undo: |
1321 | for (--i; i >= 0; --i) { |
1322 | rb_erase(&res_arr[i]->node, root); |
1323 | list_del_init(entry: &res_arr[i]->list); |
1324 | } |
1325 | |
1326 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1327 | |
1328 | for (i = 0; i < count; ++i) |
1329 | kfree(objp: res_arr[i]); |
1330 | |
1331 | kfree(objp: res_arr); |
1332 | |
1333 | return err; |
1334 | } |
1335 | |
1336 | static int remove_qp_ok(struct res_qp *res) |
1337 | { |
1338 | if (res->com.state == RES_QP_BUSY || atomic_read(v: &res->ref_count) || |
1339 | !list_empty(head: &res->mcg_list)) { |
1340 | pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n" , |
1341 | res->com.state, atomic_read(&res->ref_count)); |
1342 | return -EBUSY; |
1343 | } else if (res->com.state != RES_QP_RESERVED) { |
1344 | return -EPERM; |
1345 | } |
1346 | |
1347 | return 0; |
1348 | } |
1349 | |
1350 | static int remove_mtt_ok(struct res_mtt *res, int order) |
1351 | { |
1352 | if (res->com.state == RES_MTT_BUSY || |
1353 | atomic_read(v: &res->ref_count)) { |
1354 | pr_devel("%s-%d: state %s, ref_count %d\n" , |
1355 | __func__, __LINE__, |
1356 | mtt_states_str(res->com.state), |
1357 | atomic_read(&res->ref_count)); |
1358 | return -EBUSY; |
1359 | } else if (res->com.state != RES_MTT_ALLOCATED) |
1360 | return -EPERM; |
1361 | else if (res->order != order) |
1362 | return -EINVAL; |
1363 | |
1364 | return 0; |
1365 | } |
1366 | |
1367 | static int remove_mpt_ok(struct res_mpt *res) |
1368 | { |
1369 | if (res->com.state == RES_MPT_BUSY) |
1370 | return -EBUSY; |
1371 | else if (res->com.state != RES_MPT_RESERVED) |
1372 | return -EPERM; |
1373 | |
1374 | return 0; |
1375 | } |
1376 | |
1377 | static int remove_eq_ok(struct res_eq *res) |
1378 | { |
1379 | if (res->com.state == RES_MPT_BUSY) |
1380 | return -EBUSY; |
1381 | else if (res->com.state != RES_MPT_RESERVED) |
1382 | return -EPERM; |
1383 | |
1384 | return 0; |
1385 | } |
1386 | |
1387 | static int remove_counter_ok(struct res_counter *res) |
1388 | { |
1389 | if (res->com.state == RES_COUNTER_BUSY) |
1390 | return -EBUSY; |
1391 | else if (res->com.state != RES_COUNTER_ALLOCATED) |
1392 | return -EPERM; |
1393 | |
1394 | return 0; |
1395 | } |
1396 | |
1397 | static int remove_xrcdn_ok(struct res_xrcdn *res) |
1398 | { |
1399 | if (res->com.state == RES_XRCD_BUSY) |
1400 | return -EBUSY; |
1401 | else if (res->com.state != RES_XRCD_ALLOCATED) |
1402 | return -EPERM; |
1403 | |
1404 | return 0; |
1405 | } |
1406 | |
1407 | static int remove_fs_rule_ok(struct res_fs_rule *res) |
1408 | { |
1409 | if (res->com.state == RES_FS_RULE_BUSY) |
1410 | return -EBUSY; |
1411 | else if (res->com.state != RES_FS_RULE_ALLOCATED) |
1412 | return -EPERM; |
1413 | |
1414 | return 0; |
1415 | } |
1416 | |
1417 | static int remove_cq_ok(struct res_cq *res) |
1418 | { |
1419 | if (res->com.state == RES_CQ_BUSY) |
1420 | return -EBUSY; |
1421 | else if (res->com.state != RES_CQ_ALLOCATED) |
1422 | return -EPERM; |
1423 | |
1424 | return 0; |
1425 | } |
1426 | |
1427 | static int remove_srq_ok(struct res_srq *res) |
1428 | { |
1429 | if (res->com.state == RES_SRQ_BUSY) |
1430 | return -EBUSY; |
1431 | else if (res->com.state != RES_SRQ_ALLOCATED) |
1432 | return -EPERM; |
1433 | |
1434 | return 0; |
1435 | } |
1436 | |
1437 | static int remove_ok(struct res_common *res, enum mlx4_resource type, int ) |
1438 | { |
1439 | switch (type) { |
1440 | case RES_QP: |
1441 | return remove_qp_ok(res: (struct res_qp *)res); |
1442 | case RES_CQ: |
1443 | return remove_cq_ok(res: (struct res_cq *)res); |
1444 | case RES_SRQ: |
1445 | return remove_srq_ok(res: (struct res_srq *)res); |
1446 | case RES_MPT: |
1447 | return remove_mpt_ok(res: (struct res_mpt *)res); |
1448 | case RES_MTT: |
1449 | return remove_mtt_ok(res: (struct res_mtt *)res, order: extra); |
1450 | case RES_MAC: |
1451 | return -EOPNOTSUPP; |
1452 | case RES_EQ: |
1453 | return remove_eq_ok(res: (struct res_eq *)res); |
1454 | case RES_COUNTER: |
1455 | return remove_counter_ok(res: (struct res_counter *)res); |
1456 | case RES_XRCD: |
1457 | return remove_xrcdn_ok(res: (struct res_xrcdn *)res); |
1458 | case RES_FS_RULE: |
1459 | return remove_fs_rule_ok(res: (struct res_fs_rule *)res); |
1460 | default: |
1461 | return -EINVAL; |
1462 | } |
1463 | } |
1464 | |
1465 | static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, |
1466 | enum mlx4_resource type, int ) |
1467 | { |
1468 | u64 i; |
1469 | int err; |
1470 | struct mlx4_priv *priv = mlx4_priv(dev); |
1471 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1472 | struct res_common *r; |
1473 | |
1474 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1475 | for (i = base; i < base + count; ++i) { |
1476 | r = res_tracker_lookup(root: &tracker->res_tree[type], res_id: i); |
1477 | if (!r) { |
1478 | err = -ENOENT; |
1479 | goto out; |
1480 | } |
1481 | if (r->owner != slave) { |
1482 | err = -EPERM; |
1483 | goto out; |
1484 | } |
1485 | err = remove_ok(res: r, type, extra); |
1486 | if (err) |
1487 | goto out; |
1488 | } |
1489 | |
1490 | for (i = base; i < base + count; ++i) { |
1491 | r = res_tracker_lookup(root: &tracker->res_tree[type], res_id: i); |
1492 | rb_erase(&r->node, &tracker->res_tree[type]); |
1493 | list_del(entry: &r->list); |
1494 | kfree(objp: r); |
1495 | } |
1496 | err = 0; |
1497 | |
1498 | out: |
1499 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1500 | |
1501 | return err; |
1502 | } |
1503 | |
1504 | static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, |
1505 | enum res_qp_states state, struct res_qp **qp, |
1506 | int alloc) |
1507 | { |
1508 | struct mlx4_priv *priv = mlx4_priv(dev); |
1509 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1510 | struct res_qp *r; |
1511 | int err = 0; |
1512 | |
1513 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1514 | r = res_tracker_lookup(root: &tracker->res_tree[RES_QP], res_id: qpn); |
1515 | if (!r) |
1516 | err = -ENOENT; |
1517 | else if (r->com.owner != slave) |
1518 | err = -EPERM; |
1519 | else { |
1520 | switch (state) { |
1521 | case RES_QP_BUSY: |
1522 | mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n" , |
1523 | __func__, r->com.res_id); |
1524 | err = -EBUSY; |
1525 | break; |
1526 | |
1527 | case RES_QP_RESERVED: |
1528 | if (r->com.state == RES_QP_MAPPED && !alloc) |
1529 | break; |
1530 | |
1531 | mlx4_dbg(dev, "failed RES_QP, 0x%llx\n" , r->com.res_id); |
1532 | err = -EINVAL; |
1533 | break; |
1534 | |
1535 | case RES_QP_MAPPED: |
1536 | if ((r->com.state == RES_QP_RESERVED && alloc) || |
1537 | r->com.state == RES_QP_HW) |
1538 | break; |
1539 | else { |
1540 | mlx4_dbg(dev, "failed RES_QP, 0x%llx\n" , |
1541 | r->com.res_id); |
1542 | err = -EINVAL; |
1543 | } |
1544 | |
1545 | break; |
1546 | |
1547 | case RES_QP_HW: |
1548 | if (r->com.state != RES_QP_MAPPED) |
1549 | err = -EINVAL; |
1550 | break; |
1551 | default: |
1552 | err = -EINVAL; |
1553 | } |
1554 | |
1555 | if (!err) { |
1556 | r->com.from_state = r->com.state; |
1557 | r->com.to_state = state; |
1558 | r->com.state = RES_QP_BUSY; |
1559 | if (qp) |
1560 | *qp = r; |
1561 | } |
1562 | } |
1563 | |
1564 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1565 | |
1566 | return err; |
1567 | } |
1568 | |
1569 | static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, |
1570 | enum res_mpt_states state, struct res_mpt **mpt) |
1571 | { |
1572 | struct mlx4_priv *priv = mlx4_priv(dev); |
1573 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1574 | struct res_mpt *r; |
1575 | int err = 0; |
1576 | |
1577 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1578 | r = res_tracker_lookup(root: &tracker->res_tree[RES_MPT], res_id: index); |
1579 | if (!r) |
1580 | err = -ENOENT; |
1581 | else if (r->com.owner != slave) |
1582 | err = -EPERM; |
1583 | else { |
1584 | switch (state) { |
1585 | case RES_MPT_BUSY: |
1586 | err = -EINVAL; |
1587 | break; |
1588 | |
1589 | case RES_MPT_RESERVED: |
1590 | if (r->com.state != RES_MPT_MAPPED) |
1591 | err = -EINVAL; |
1592 | break; |
1593 | |
1594 | case RES_MPT_MAPPED: |
1595 | if (r->com.state != RES_MPT_RESERVED && |
1596 | r->com.state != RES_MPT_HW) |
1597 | err = -EINVAL; |
1598 | break; |
1599 | |
1600 | case RES_MPT_HW: |
1601 | if (r->com.state != RES_MPT_MAPPED) |
1602 | err = -EINVAL; |
1603 | break; |
1604 | default: |
1605 | err = -EINVAL; |
1606 | } |
1607 | |
1608 | if (!err) { |
1609 | r->com.from_state = r->com.state; |
1610 | r->com.to_state = state; |
1611 | r->com.state = RES_MPT_BUSY; |
1612 | if (mpt) |
1613 | *mpt = r; |
1614 | } |
1615 | } |
1616 | |
1617 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1618 | |
1619 | return err; |
1620 | } |
1621 | |
1622 | static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, |
1623 | enum res_eq_states state, struct res_eq **eq) |
1624 | { |
1625 | struct mlx4_priv *priv = mlx4_priv(dev); |
1626 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1627 | struct res_eq *r; |
1628 | int err = 0; |
1629 | |
1630 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1631 | r = res_tracker_lookup(root: &tracker->res_tree[RES_EQ], res_id: index); |
1632 | if (!r) |
1633 | err = -ENOENT; |
1634 | else if (r->com.owner != slave) |
1635 | err = -EPERM; |
1636 | else { |
1637 | switch (state) { |
1638 | case RES_EQ_BUSY: |
1639 | err = -EINVAL; |
1640 | break; |
1641 | |
1642 | case RES_EQ_RESERVED: |
1643 | if (r->com.state != RES_EQ_HW) |
1644 | err = -EINVAL; |
1645 | break; |
1646 | |
1647 | case RES_EQ_HW: |
1648 | if (r->com.state != RES_EQ_RESERVED) |
1649 | err = -EINVAL; |
1650 | break; |
1651 | |
1652 | default: |
1653 | err = -EINVAL; |
1654 | } |
1655 | |
1656 | if (!err) { |
1657 | r->com.from_state = r->com.state; |
1658 | r->com.to_state = state; |
1659 | r->com.state = RES_EQ_BUSY; |
1660 | } |
1661 | } |
1662 | |
1663 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1664 | |
1665 | if (!err && eq) |
1666 | *eq = r; |
1667 | |
1668 | return err; |
1669 | } |
1670 | |
1671 | static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, |
1672 | enum res_cq_states state, struct res_cq **cq) |
1673 | { |
1674 | struct mlx4_priv *priv = mlx4_priv(dev); |
1675 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1676 | struct res_cq *r; |
1677 | int err; |
1678 | |
1679 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1680 | r = res_tracker_lookup(root: &tracker->res_tree[RES_CQ], res_id: cqn); |
1681 | if (!r) { |
1682 | err = -ENOENT; |
1683 | } else if (r->com.owner != slave) { |
1684 | err = -EPERM; |
1685 | } else if (state == RES_CQ_ALLOCATED) { |
1686 | if (r->com.state != RES_CQ_HW) |
1687 | err = -EINVAL; |
1688 | else if (atomic_read(v: &r->ref_count)) |
1689 | err = -EBUSY; |
1690 | else |
1691 | err = 0; |
1692 | } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { |
1693 | err = -EINVAL; |
1694 | } else { |
1695 | err = 0; |
1696 | } |
1697 | |
1698 | if (!err) { |
1699 | r->com.from_state = r->com.state; |
1700 | r->com.to_state = state; |
1701 | r->com.state = RES_CQ_BUSY; |
1702 | if (cq) |
1703 | *cq = r; |
1704 | } |
1705 | |
1706 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1707 | |
1708 | return err; |
1709 | } |
1710 | |
1711 | static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, |
1712 | enum res_srq_states state, struct res_srq **srq) |
1713 | { |
1714 | struct mlx4_priv *priv = mlx4_priv(dev); |
1715 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1716 | struct res_srq *r; |
1717 | int err = 0; |
1718 | |
1719 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1720 | r = res_tracker_lookup(root: &tracker->res_tree[RES_SRQ], res_id: index); |
1721 | if (!r) { |
1722 | err = -ENOENT; |
1723 | } else if (r->com.owner != slave) { |
1724 | err = -EPERM; |
1725 | } else if (state == RES_SRQ_ALLOCATED) { |
1726 | if (r->com.state != RES_SRQ_HW) |
1727 | err = -EINVAL; |
1728 | else if (atomic_read(v: &r->ref_count)) |
1729 | err = -EBUSY; |
1730 | } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { |
1731 | err = -EINVAL; |
1732 | } |
1733 | |
1734 | if (!err) { |
1735 | r->com.from_state = r->com.state; |
1736 | r->com.to_state = state; |
1737 | r->com.state = RES_SRQ_BUSY; |
1738 | if (srq) |
1739 | *srq = r; |
1740 | } |
1741 | |
1742 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1743 | |
1744 | return err; |
1745 | } |
1746 | |
1747 | static void res_abort_move(struct mlx4_dev *dev, int slave, |
1748 | enum mlx4_resource type, int id) |
1749 | { |
1750 | struct mlx4_priv *priv = mlx4_priv(dev); |
1751 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1752 | struct res_common *r; |
1753 | |
1754 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1755 | r = res_tracker_lookup(root: &tracker->res_tree[type], res_id: id); |
1756 | if (r && (r->owner == slave)) |
1757 | r->state = r->from_state; |
1758 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1759 | } |
1760 | |
1761 | static void res_end_move(struct mlx4_dev *dev, int slave, |
1762 | enum mlx4_resource type, int id) |
1763 | { |
1764 | struct mlx4_priv *priv = mlx4_priv(dev); |
1765 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
1766 | struct res_common *r; |
1767 | |
1768 | spin_lock_irq(lock: mlx4_tlock(dev)); |
1769 | r = res_tracker_lookup(root: &tracker->res_tree[type], res_id: id); |
1770 | if (r && (r->owner == slave)) |
1771 | r->state = r->to_state; |
1772 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
1773 | } |
1774 | |
1775 | static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) |
1776 | { |
1777 | return mlx4_is_qp_reserved(dev, qpn) && |
1778 | (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); |
1779 | } |
1780 | |
1781 | static int fw_reserved(struct mlx4_dev *dev, int qpn) |
1782 | { |
1783 | return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; |
1784 | } |
1785 | |
1786 | static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
1787 | u64 in_param, u64 *out_param) |
1788 | { |
1789 | int err; |
1790 | int count; |
1791 | int align; |
1792 | int base; |
1793 | int qpn; |
1794 | u8 flags; |
1795 | |
1796 | switch (op) { |
1797 | case RES_OP_RESERVE: |
1798 | count = get_param_l(arg: &in_param) & 0xffffff; |
1799 | /* Turn off all unsupported QP allocation flags that the |
1800 | * slave tries to set. |
1801 | */ |
1802 | flags = (get_param_l(arg: &in_param) >> 24) & dev->caps.alloc_res_qp_mask; |
1803 | align = get_param_h(arg: &in_param); |
1804 | err = mlx4_grant_resource(dev, slave, res_type: RES_QP, count, port: 0); |
1805 | if (err) |
1806 | return err; |
1807 | |
1808 | err = __mlx4_qp_reserve_range(dev, cnt: count, align, base: &base, flags); |
1809 | if (err) { |
1810 | mlx4_release_resource(dev, slave, res_type: RES_QP, count, port: 0); |
1811 | return err; |
1812 | } |
1813 | |
1814 | err = add_res_range(dev, slave, base, count, type: RES_QP, extra: 0); |
1815 | if (err) { |
1816 | mlx4_release_resource(dev, slave, res_type: RES_QP, count, port: 0); |
1817 | __mlx4_qp_release_range(dev, base_qpn: base, cnt: count); |
1818 | return err; |
1819 | } |
1820 | set_param_l(arg: out_param, val: base); |
1821 | break; |
1822 | case RES_OP_MAP_ICM: |
1823 | qpn = get_param_l(arg: &in_param) & 0x7fffff; |
1824 | if (valid_reserved(dev, slave, qpn)) { |
1825 | err = add_res_range(dev, slave, base: qpn, count: 1, type: RES_QP, extra: 0); |
1826 | if (err) |
1827 | return err; |
1828 | } |
1829 | |
1830 | err = qp_res_start_move_to(dev, slave, qpn, state: RES_QP_MAPPED, |
1831 | NULL, alloc: 1); |
1832 | if (err) |
1833 | return err; |
1834 | |
1835 | if (!fw_reserved(dev, qpn)) { |
1836 | err = __mlx4_qp_alloc_icm(dev, qpn); |
1837 | if (err) { |
1838 | res_abort_move(dev, slave, type: RES_QP, id: qpn); |
1839 | return err; |
1840 | } |
1841 | } |
1842 | |
1843 | res_end_move(dev, slave, type: RES_QP, id: qpn); |
1844 | break; |
1845 | |
1846 | default: |
1847 | err = -EINVAL; |
1848 | break; |
1849 | } |
1850 | return err; |
1851 | } |
1852 | |
1853 | static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
1854 | u64 in_param, u64 *out_param) |
1855 | { |
1856 | int err = -EINVAL; |
1857 | int base; |
1858 | int order; |
1859 | |
1860 | if (op != RES_OP_RESERVE_AND_MAP) |
1861 | return err; |
1862 | |
1863 | order = get_param_l(arg: &in_param); |
1864 | |
1865 | err = mlx4_grant_resource(dev, slave, res_type: RES_MTT, count: 1 << order, port: 0); |
1866 | if (err) |
1867 | return err; |
1868 | |
1869 | base = __mlx4_alloc_mtt_range(dev, order); |
1870 | if (base == -1) { |
1871 | mlx4_release_resource(dev, slave, res_type: RES_MTT, count: 1 << order, port: 0); |
1872 | return -ENOMEM; |
1873 | } |
1874 | |
1875 | err = add_res_range(dev, slave, base, count: 1, type: RES_MTT, extra: order); |
1876 | if (err) { |
1877 | mlx4_release_resource(dev, slave, res_type: RES_MTT, count: 1 << order, port: 0); |
1878 | __mlx4_free_mtt_range(dev, first_seg: base, order); |
1879 | } else { |
1880 | set_param_l(arg: out_param, val: base); |
1881 | } |
1882 | |
1883 | return err; |
1884 | } |
1885 | |
1886 | static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
1887 | u64 in_param, u64 *out_param) |
1888 | { |
1889 | int err = -EINVAL; |
1890 | int index; |
1891 | int id; |
1892 | struct res_mpt *mpt; |
1893 | |
1894 | switch (op) { |
1895 | case RES_OP_RESERVE: |
1896 | err = mlx4_grant_resource(dev, slave, res_type: RES_MPT, count: 1, port: 0); |
1897 | if (err) |
1898 | break; |
1899 | |
1900 | index = __mlx4_mpt_reserve(dev); |
1901 | if (index == -1) { |
1902 | mlx4_release_resource(dev, slave, res_type: RES_MPT, count: 1, port: 0); |
1903 | break; |
1904 | } |
1905 | id = index & mpt_mask(dev); |
1906 | |
1907 | err = add_res_range(dev, slave, base: id, count: 1, type: RES_MPT, extra: index); |
1908 | if (err) { |
1909 | mlx4_release_resource(dev, slave, res_type: RES_MPT, count: 1, port: 0); |
1910 | __mlx4_mpt_release(dev, index); |
1911 | break; |
1912 | } |
1913 | set_param_l(arg: out_param, val: index); |
1914 | break; |
1915 | case RES_OP_MAP_ICM: |
1916 | index = get_param_l(arg: &in_param); |
1917 | id = index & mpt_mask(dev); |
1918 | err = mr_res_start_move_to(dev, slave, index: id, |
1919 | state: RES_MPT_MAPPED, mpt: &mpt); |
1920 | if (err) |
1921 | return err; |
1922 | |
1923 | err = __mlx4_mpt_alloc_icm(dev, index: mpt->key); |
1924 | if (err) { |
1925 | res_abort_move(dev, slave, type: RES_MPT, id); |
1926 | return err; |
1927 | } |
1928 | |
1929 | res_end_move(dev, slave, type: RES_MPT, id); |
1930 | break; |
1931 | } |
1932 | return err; |
1933 | } |
1934 | |
1935 | static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
1936 | u64 in_param, u64 *out_param) |
1937 | { |
1938 | int cqn; |
1939 | int err; |
1940 | |
1941 | switch (op) { |
1942 | case RES_OP_RESERVE_AND_MAP: |
1943 | err = mlx4_grant_resource(dev, slave, res_type: RES_CQ, count: 1, port: 0); |
1944 | if (err) |
1945 | break; |
1946 | |
1947 | err = __mlx4_cq_alloc_icm(dev, cqn: &cqn); |
1948 | if (err) { |
1949 | mlx4_release_resource(dev, slave, res_type: RES_CQ, count: 1, port: 0); |
1950 | break; |
1951 | } |
1952 | |
1953 | err = add_res_range(dev, slave, base: cqn, count: 1, type: RES_CQ, extra: 0); |
1954 | if (err) { |
1955 | mlx4_release_resource(dev, slave, res_type: RES_CQ, count: 1, port: 0); |
1956 | __mlx4_cq_free_icm(dev, cqn); |
1957 | break; |
1958 | } |
1959 | |
1960 | set_param_l(arg: out_param, val: cqn); |
1961 | break; |
1962 | |
1963 | default: |
1964 | err = -EINVAL; |
1965 | } |
1966 | |
1967 | return err; |
1968 | } |
1969 | |
1970 | static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
1971 | u64 in_param, u64 *out_param) |
1972 | { |
1973 | int srqn; |
1974 | int err; |
1975 | |
1976 | switch (op) { |
1977 | case RES_OP_RESERVE_AND_MAP: |
1978 | err = mlx4_grant_resource(dev, slave, res_type: RES_SRQ, count: 1, port: 0); |
1979 | if (err) |
1980 | break; |
1981 | |
1982 | err = __mlx4_srq_alloc_icm(dev, srqn: &srqn); |
1983 | if (err) { |
1984 | mlx4_release_resource(dev, slave, res_type: RES_SRQ, count: 1, port: 0); |
1985 | break; |
1986 | } |
1987 | |
1988 | err = add_res_range(dev, slave, base: srqn, count: 1, type: RES_SRQ, extra: 0); |
1989 | if (err) { |
1990 | mlx4_release_resource(dev, slave, res_type: RES_SRQ, count: 1, port: 0); |
1991 | __mlx4_srq_free_icm(dev, srqn); |
1992 | break; |
1993 | } |
1994 | |
1995 | set_param_l(arg: out_param, val: srqn); |
1996 | break; |
1997 | |
1998 | default: |
1999 | err = -EINVAL; |
2000 | } |
2001 | |
2002 | return err; |
2003 | } |
2004 | |
2005 | static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, |
2006 | u8 smac_index, u64 *mac) |
2007 | { |
2008 | struct mlx4_priv *priv = mlx4_priv(dev); |
2009 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2010 | struct list_head *mac_list = |
2011 | &tracker->slave_list[slave].res_list[RES_MAC]; |
2012 | struct mac_res *res, *tmp; |
2013 | |
2014 | list_for_each_entry_safe(res, tmp, mac_list, list) { |
2015 | if (res->smac_index == smac_index && res->port == (u8) port) { |
2016 | *mac = res->mac; |
2017 | return 0; |
2018 | } |
2019 | } |
2020 | return -ENOENT; |
2021 | } |
2022 | |
2023 | static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) |
2024 | { |
2025 | struct mlx4_priv *priv = mlx4_priv(dev); |
2026 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2027 | struct list_head *mac_list = |
2028 | &tracker->slave_list[slave].res_list[RES_MAC]; |
2029 | struct mac_res *res, *tmp; |
2030 | |
2031 | list_for_each_entry_safe(res, tmp, mac_list, list) { |
2032 | if (res->mac == mac && res->port == (u8) port) { |
2033 | /* mac found. update ref count */ |
2034 | ++res->ref_count; |
2035 | return 0; |
2036 | } |
2037 | } |
2038 | |
2039 | if (mlx4_grant_resource(dev, slave, res_type: RES_MAC, count: 1, port)) |
2040 | return -EINVAL; |
2041 | res = kzalloc(size: sizeof(*res), GFP_KERNEL); |
2042 | if (!res) { |
2043 | mlx4_release_resource(dev, slave, res_type: RES_MAC, count: 1, port); |
2044 | return -ENOMEM; |
2045 | } |
2046 | res->mac = mac; |
2047 | res->port = (u8) port; |
2048 | res->smac_index = smac_index; |
2049 | res->ref_count = 1; |
2050 | list_add_tail(new: &res->list, |
2051 | head: &tracker->slave_list[slave].res_list[RES_MAC]); |
2052 | return 0; |
2053 | } |
2054 | |
2055 | static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, |
2056 | int port) |
2057 | { |
2058 | struct mlx4_priv *priv = mlx4_priv(dev); |
2059 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2060 | struct list_head *mac_list = |
2061 | &tracker->slave_list[slave].res_list[RES_MAC]; |
2062 | struct mac_res *res, *tmp; |
2063 | |
2064 | list_for_each_entry_safe(res, tmp, mac_list, list) { |
2065 | if (res->mac == mac && res->port == (u8) port) { |
2066 | if (!--res->ref_count) { |
2067 | list_del(entry: &res->list); |
2068 | mlx4_release_resource(dev, slave, res_type: RES_MAC, count: 1, port); |
2069 | kfree(objp: res); |
2070 | } |
2071 | break; |
2072 | } |
2073 | } |
2074 | } |
2075 | |
2076 | static void rem_slave_macs(struct mlx4_dev *dev, int slave) |
2077 | { |
2078 | struct mlx4_priv *priv = mlx4_priv(dev); |
2079 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2080 | struct list_head *mac_list = |
2081 | &tracker->slave_list[slave].res_list[RES_MAC]; |
2082 | struct mac_res *res, *tmp; |
2083 | int i; |
2084 | |
2085 | list_for_each_entry_safe(res, tmp, mac_list, list) { |
2086 | list_del(entry: &res->list); |
2087 | /* dereference the mac the num times the slave referenced it */ |
2088 | for (i = 0; i < res->ref_count; i++) |
2089 | __mlx4_unregister_mac(dev, port: res->port, mac: res->mac); |
2090 | mlx4_release_resource(dev, slave, res_type: RES_MAC, count: 1, port: res->port); |
2091 | kfree(objp: res); |
2092 | } |
2093 | } |
2094 | |
2095 | static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2096 | u64 in_param, u64 *out_param, int in_port) |
2097 | { |
2098 | int err = -EINVAL; |
2099 | int port; |
2100 | u64 mac; |
2101 | u8 smac_index; |
2102 | |
2103 | if (op != RES_OP_RESERVE_AND_MAP) |
2104 | return err; |
2105 | |
2106 | port = !in_port ? get_param_l(arg: out_param) : in_port; |
2107 | port = mlx4_slave_convert_port( |
2108 | dev, slave, port); |
2109 | |
2110 | if (port < 0) |
2111 | return -EINVAL; |
2112 | mac = in_param; |
2113 | |
2114 | err = __mlx4_register_mac(dev, port, mac); |
2115 | if (err >= 0) { |
2116 | smac_index = err; |
2117 | set_param_l(arg: out_param, val: err); |
2118 | err = 0; |
2119 | } |
2120 | |
2121 | if (!err) { |
2122 | err = mac_add_to_slave(dev, slave, mac, port, smac_index); |
2123 | if (err) |
2124 | __mlx4_unregister_mac(dev, port, mac); |
2125 | } |
2126 | return err; |
2127 | } |
2128 | |
2129 | static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, |
2130 | int port, int vlan_index) |
2131 | { |
2132 | struct mlx4_priv *priv = mlx4_priv(dev); |
2133 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2134 | struct list_head *vlan_list = |
2135 | &tracker->slave_list[slave].res_list[RES_VLAN]; |
2136 | struct vlan_res *res, *tmp; |
2137 | |
2138 | list_for_each_entry_safe(res, tmp, vlan_list, list) { |
2139 | if (res->vlan == vlan && res->port == (u8) port) { |
2140 | /* vlan found. update ref count */ |
2141 | ++res->ref_count; |
2142 | return 0; |
2143 | } |
2144 | } |
2145 | |
2146 | if (mlx4_grant_resource(dev, slave, res_type: RES_VLAN, count: 1, port)) |
2147 | return -EINVAL; |
2148 | res = kzalloc(size: sizeof(*res), GFP_KERNEL); |
2149 | if (!res) { |
2150 | mlx4_release_resource(dev, slave, res_type: RES_VLAN, count: 1, port); |
2151 | return -ENOMEM; |
2152 | } |
2153 | res->vlan = vlan; |
2154 | res->port = (u8) port; |
2155 | res->vlan_index = vlan_index; |
2156 | res->ref_count = 1; |
2157 | list_add_tail(new: &res->list, |
2158 | head: &tracker->slave_list[slave].res_list[RES_VLAN]); |
2159 | return 0; |
2160 | } |
2161 | |
2162 | |
2163 | static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, |
2164 | int port) |
2165 | { |
2166 | struct mlx4_priv *priv = mlx4_priv(dev); |
2167 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2168 | struct list_head *vlan_list = |
2169 | &tracker->slave_list[slave].res_list[RES_VLAN]; |
2170 | struct vlan_res *res, *tmp; |
2171 | |
2172 | list_for_each_entry_safe(res, tmp, vlan_list, list) { |
2173 | if (res->vlan == vlan && res->port == (u8) port) { |
2174 | if (!--res->ref_count) { |
2175 | list_del(entry: &res->list); |
2176 | mlx4_release_resource(dev, slave, res_type: RES_VLAN, |
2177 | count: 1, port); |
2178 | kfree(objp: res); |
2179 | } |
2180 | break; |
2181 | } |
2182 | } |
2183 | } |
2184 | |
2185 | static void rem_slave_vlans(struct mlx4_dev *dev, int slave) |
2186 | { |
2187 | struct mlx4_priv *priv = mlx4_priv(dev); |
2188 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
2189 | struct list_head *vlan_list = |
2190 | &tracker->slave_list[slave].res_list[RES_VLAN]; |
2191 | struct vlan_res *res, *tmp; |
2192 | int i; |
2193 | |
2194 | list_for_each_entry_safe(res, tmp, vlan_list, list) { |
2195 | list_del(entry: &res->list); |
2196 | /* dereference the vlan the num times the slave referenced it */ |
2197 | for (i = 0; i < res->ref_count; i++) |
2198 | __mlx4_unregister_vlan(dev, port: res->port, vlan: res->vlan); |
2199 | mlx4_release_resource(dev, slave, res_type: RES_VLAN, count: 1, port: res->port); |
2200 | kfree(objp: res); |
2201 | } |
2202 | } |
2203 | |
2204 | static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2205 | u64 in_param, u64 *out_param, int in_port) |
2206 | { |
2207 | struct mlx4_priv *priv = mlx4_priv(dev); |
2208 | struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; |
2209 | int err; |
2210 | u16 vlan; |
2211 | int vlan_index; |
2212 | int port; |
2213 | |
2214 | port = !in_port ? get_param_l(arg: out_param) : in_port; |
2215 | |
2216 | if (!port || op != RES_OP_RESERVE_AND_MAP) |
2217 | return -EINVAL; |
2218 | |
2219 | port = mlx4_slave_convert_port( |
2220 | dev, slave, port); |
2221 | |
2222 | if (port < 0) |
2223 | return -EINVAL; |
2224 | /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ |
2225 | if (!in_port && port > 0 && port <= dev->caps.num_ports) { |
2226 | slave_state[slave].old_vlan_api = true; |
2227 | return 0; |
2228 | } |
2229 | |
2230 | vlan = (u16) in_param; |
2231 | |
2232 | err = __mlx4_register_vlan(dev, port, vlan, index: &vlan_index); |
2233 | if (!err) { |
2234 | set_param_l(arg: out_param, val: (u32) vlan_index); |
2235 | err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); |
2236 | if (err) |
2237 | __mlx4_unregister_vlan(dev, port, vlan); |
2238 | } |
2239 | return err; |
2240 | } |
2241 | |
2242 | static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2243 | u64 in_param, u64 *out_param, int port) |
2244 | { |
2245 | u32 index; |
2246 | int err; |
2247 | |
2248 | if (op != RES_OP_RESERVE) |
2249 | return -EINVAL; |
2250 | |
2251 | err = mlx4_grant_resource(dev, slave, res_type: RES_COUNTER, count: 1, port: 0); |
2252 | if (err) |
2253 | return err; |
2254 | |
2255 | err = __mlx4_counter_alloc(dev, idx: &index); |
2256 | if (err) { |
2257 | mlx4_release_resource(dev, slave, res_type: RES_COUNTER, count: 1, port: 0); |
2258 | return err; |
2259 | } |
2260 | |
2261 | err = add_res_range(dev, slave, base: index, count: 1, type: RES_COUNTER, extra: port); |
2262 | if (err) { |
2263 | __mlx4_counter_free(dev, idx: index); |
2264 | mlx4_release_resource(dev, slave, res_type: RES_COUNTER, count: 1, port: 0); |
2265 | } else { |
2266 | set_param_l(arg: out_param, val: index); |
2267 | } |
2268 | |
2269 | return err; |
2270 | } |
2271 | |
2272 | static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2273 | u64 in_param, u64 *out_param) |
2274 | { |
2275 | u32 xrcdn; |
2276 | int err; |
2277 | |
2278 | if (op != RES_OP_RESERVE) |
2279 | return -EINVAL; |
2280 | |
2281 | err = __mlx4_xrcd_alloc(dev, xrcdn: &xrcdn); |
2282 | if (err) |
2283 | return err; |
2284 | |
2285 | err = add_res_range(dev, slave, base: xrcdn, count: 1, type: RES_XRCD, extra: 0); |
2286 | if (err) |
2287 | __mlx4_xrcd_free(dev, xrcdn); |
2288 | else |
2289 | set_param_l(arg: out_param, val: xrcdn); |
2290 | |
2291 | return err; |
2292 | } |
2293 | |
2294 | int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, |
2295 | struct mlx4_vhcr *vhcr, |
2296 | struct mlx4_cmd_mailbox *inbox, |
2297 | struct mlx4_cmd_mailbox *outbox, |
2298 | struct mlx4_cmd_info *cmd) |
2299 | { |
2300 | int err; |
2301 | int alop = vhcr->op_modifier; |
2302 | |
2303 | switch (vhcr->in_modifier & 0xFF) { |
2304 | case RES_QP: |
2305 | err = qp_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2306 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2307 | break; |
2308 | |
2309 | case RES_MTT: |
2310 | err = mtt_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2311 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2312 | break; |
2313 | |
2314 | case RES_MPT: |
2315 | err = mpt_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2316 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2317 | break; |
2318 | |
2319 | case RES_CQ: |
2320 | err = cq_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2321 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2322 | break; |
2323 | |
2324 | case RES_SRQ: |
2325 | err = srq_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2326 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2327 | break; |
2328 | |
2329 | case RES_MAC: |
2330 | err = mac_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2331 | in_param: vhcr->in_param, out_param: &vhcr->out_param, |
2332 | in_port: (vhcr->in_modifier >> 8) & 0xFF); |
2333 | break; |
2334 | |
2335 | case RES_VLAN: |
2336 | err = vlan_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2337 | in_param: vhcr->in_param, out_param: &vhcr->out_param, |
2338 | in_port: (vhcr->in_modifier >> 8) & 0xFF); |
2339 | break; |
2340 | |
2341 | case RES_COUNTER: |
2342 | err = counter_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2343 | in_param: vhcr->in_param, out_param: &vhcr->out_param, port: 0); |
2344 | break; |
2345 | |
2346 | case RES_XRCD: |
2347 | err = xrcdn_alloc_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2348 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2349 | break; |
2350 | |
2351 | default: |
2352 | err = -EINVAL; |
2353 | break; |
2354 | } |
2355 | |
2356 | return err; |
2357 | } |
2358 | |
2359 | static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2360 | u64 in_param) |
2361 | { |
2362 | int err; |
2363 | int count; |
2364 | int base; |
2365 | int qpn; |
2366 | |
2367 | switch (op) { |
2368 | case RES_OP_RESERVE: |
2369 | base = get_param_l(arg: &in_param) & 0x7fffff; |
2370 | count = get_param_h(arg: &in_param); |
2371 | err = rem_res_range(dev, slave, base, count, type: RES_QP, extra: 0); |
2372 | if (err) |
2373 | break; |
2374 | mlx4_release_resource(dev, slave, res_type: RES_QP, count, port: 0); |
2375 | __mlx4_qp_release_range(dev, base_qpn: base, cnt: count); |
2376 | break; |
2377 | case RES_OP_MAP_ICM: |
2378 | qpn = get_param_l(arg: &in_param) & 0x7fffff; |
2379 | err = qp_res_start_move_to(dev, slave, qpn, state: RES_QP_RESERVED, |
2380 | NULL, alloc: 0); |
2381 | if (err) |
2382 | return err; |
2383 | |
2384 | if (!fw_reserved(dev, qpn)) |
2385 | __mlx4_qp_free_icm(dev, qpn); |
2386 | |
2387 | res_end_move(dev, slave, type: RES_QP, id: qpn); |
2388 | |
2389 | if (valid_reserved(dev, slave, qpn)) |
2390 | err = rem_res_range(dev, slave, base: qpn, count: 1, type: RES_QP, extra: 0); |
2391 | break; |
2392 | default: |
2393 | err = -EINVAL; |
2394 | break; |
2395 | } |
2396 | return err; |
2397 | } |
2398 | |
2399 | static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2400 | u64 in_param, u64 *out_param) |
2401 | { |
2402 | int err = -EINVAL; |
2403 | int base; |
2404 | int order; |
2405 | |
2406 | if (op != RES_OP_RESERVE_AND_MAP) |
2407 | return err; |
2408 | |
2409 | base = get_param_l(arg: &in_param); |
2410 | order = get_param_h(arg: &in_param); |
2411 | err = rem_res_range(dev, slave, base, count: 1, type: RES_MTT, extra: order); |
2412 | if (!err) { |
2413 | mlx4_release_resource(dev, slave, res_type: RES_MTT, count: 1 << order, port: 0); |
2414 | __mlx4_free_mtt_range(dev, first_seg: base, order); |
2415 | } |
2416 | return err; |
2417 | } |
2418 | |
2419 | static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2420 | u64 in_param) |
2421 | { |
2422 | int err = -EINVAL; |
2423 | int index; |
2424 | int id; |
2425 | struct res_mpt *mpt; |
2426 | |
2427 | switch (op) { |
2428 | case RES_OP_RESERVE: |
2429 | index = get_param_l(arg: &in_param); |
2430 | id = index & mpt_mask(dev); |
2431 | err = get_res(dev, slave, id, RES_MPT, &mpt); |
2432 | if (err) |
2433 | break; |
2434 | index = mpt->key; |
2435 | put_res(dev, slave, res_id: id, type: RES_MPT); |
2436 | |
2437 | err = rem_res_range(dev, slave, base: id, count: 1, type: RES_MPT, extra: 0); |
2438 | if (err) |
2439 | break; |
2440 | mlx4_release_resource(dev, slave, res_type: RES_MPT, count: 1, port: 0); |
2441 | __mlx4_mpt_release(dev, index); |
2442 | break; |
2443 | case RES_OP_MAP_ICM: |
2444 | index = get_param_l(arg: &in_param); |
2445 | id = index & mpt_mask(dev); |
2446 | err = mr_res_start_move_to(dev, slave, index: id, |
2447 | state: RES_MPT_RESERVED, mpt: &mpt); |
2448 | if (err) |
2449 | return err; |
2450 | |
2451 | __mlx4_mpt_free_icm(dev, index: mpt->key); |
2452 | res_end_move(dev, slave, type: RES_MPT, id); |
2453 | break; |
2454 | default: |
2455 | err = -EINVAL; |
2456 | break; |
2457 | } |
2458 | return err; |
2459 | } |
2460 | |
2461 | static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2462 | u64 in_param, u64 *out_param) |
2463 | { |
2464 | int cqn; |
2465 | int err; |
2466 | |
2467 | switch (op) { |
2468 | case RES_OP_RESERVE_AND_MAP: |
2469 | cqn = get_param_l(arg: &in_param); |
2470 | err = rem_res_range(dev, slave, base: cqn, count: 1, type: RES_CQ, extra: 0); |
2471 | if (err) |
2472 | break; |
2473 | |
2474 | mlx4_release_resource(dev, slave, res_type: RES_CQ, count: 1, port: 0); |
2475 | __mlx4_cq_free_icm(dev, cqn); |
2476 | break; |
2477 | |
2478 | default: |
2479 | err = -EINVAL; |
2480 | break; |
2481 | } |
2482 | |
2483 | return err; |
2484 | } |
2485 | |
2486 | static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2487 | u64 in_param, u64 *out_param) |
2488 | { |
2489 | int srqn; |
2490 | int err; |
2491 | |
2492 | switch (op) { |
2493 | case RES_OP_RESERVE_AND_MAP: |
2494 | srqn = get_param_l(arg: &in_param); |
2495 | err = rem_res_range(dev, slave, base: srqn, count: 1, type: RES_SRQ, extra: 0); |
2496 | if (err) |
2497 | break; |
2498 | |
2499 | mlx4_release_resource(dev, slave, res_type: RES_SRQ, count: 1, port: 0); |
2500 | __mlx4_srq_free_icm(dev, srqn); |
2501 | break; |
2502 | |
2503 | default: |
2504 | err = -EINVAL; |
2505 | break; |
2506 | } |
2507 | |
2508 | return err; |
2509 | } |
2510 | |
2511 | static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2512 | u64 in_param, u64 *out_param, int in_port) |
2513 | { |
2514 | int port; |
2515 | int err = 0; |
2516 | |
2517 | switch (op) { |
2518 | case RES_OP_RESERVE_AND_MAP: |
2519 | port = !in_port ? get_param_l(arg: out_param) : in_port; |
2520 | port = mlx4_slave_convert_port( |
2521 | dev, slave, port); |
2522 | |
2523 | if (port < 0) |
2524 | return -EINVAL; |
2525 | mac_del_from_slave(dev, slave, mac: in_param, port); |
2526 | __mlx4_unregister_mac(dev, port, mac: in_param); |
2527 | break; |
2528 | default: |
2529 | err = -EINVAL; |
2530 | break; |
2531 | } |
2532 | |
2533 | return err; |
2534 | |
2535 | } |
2536 | |
2537 | static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2538 | u64 in_param, u64 *out_param, int port) |
2539 | { |
2540 | struct mlx4_priv *priv = mlx4_priv(dev); |
2541 | struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; |
2542 | int err = 0; |
2543 | |
2544 | port = mlx4_slave_convert_port( |
2545 | dev, slave, port); |
2546 | |
2547 | if (port < 0) |
2548 | return -EINVAL; |
2549 | switch (op) { |
2550 | case RES_OP_RESERVE_AND_MAP: |
2551 | if (slave_state[slave].old_vlan_api) |
2552 | return 0; |
2553 | if (!port) |
2554 | return -EINVAL; |
2555 | vlan_del_from_slave(dev, slave, vlan: in_param, port); |
2556 | __mlx4_unregister_vlan(dev, port, vlan: in_param); |
2557 | break; |
2558 | default: |
2559 | err = -EINVAL; |
2560 | break; |
2561 | } |
2562 | |
2563 | return err; |
2564 | } |
2565 | |
2566 | static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2567 | u64 in_param, u64 *out_param) |
2568 | { |
2569 | int index; |
2570 | int err; |
2571 | |
2572 | if (op != RES_OP_RESERVE) |
2573 | return -EINVAL; |
2574 | |
2575 | index = get_param_l(arg: &in_param); |
2576 | if (index == MLX4_SINK_COUNTER_INDEX(dev)) |
2577 | return 0; |
2578 | |
2579 | err = rem_res_range(dev, slave, base: index, count: 1, type: RES_COUNTER, extra: 0); |
2580 | if (err) |
2581 | return err; |
2582 | |
2583 | __mlx4_counter_free(dev, idx: index); |
2584 | mlx4_release_resource(dev, slave, res_type: RES_COUNTER, count: 1, port: 0); |
2585 | |
2586 | return err; |
2587 | } |
2588 | |
2589 | static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, |
2590 | u64 in_param, u64 *out_param) |
2591 | { |
2592 | int xrcdn; |
2593 | int err; |
2594 | |
2595 | if (op != RES_OP_RESERVE) |
2596 | return -EINVAL; |
2597 | |
2598 | xrcdn = get_param_l(arg: &in_param); |
2599 | err = rem_res_range(dev, slave, base: xrcdn, count: 1, type: RES_XRCD, extra: 0); |
2600 | if (err) |
2601 | return err; |
2602 | |
2603 | __mlx4_xrcd_free(dev, xrcdn); |
2604 | |
2605 | return err; |
2606 | } |
2607 | |
2608 | int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, |
2609 | struct mlx4_vhcr *vhcr, |
2610 | struct mlx4_cmd_mailbox *inbox, |
2611 | struct mlx4_cmd_mailbox *outbox, |
2612 | struct mlx4_cmd_info *cmd) |
2613 | { |
2614 | int err = -EINVAL; |
2615 | int alop = vhcr->op_modifier; |
2616 | |
2617 | switch (vhcr->in_modifier & 0xFF) { |
2618 | case RES_QP: |
2619 | err = qp_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2620 | in_param: vhcr->in_param); |
2621 | break; |
2622 | |
2623 | case RES_MTT: |
2624 | err = mtt_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2625 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2626 | break; |
2627 | |
2628 | case RES_MPT: |
2629 | err = mpt_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2630 | in_param: vhcr->in_param); |
2631 | break; |
2632 | |
2633 | case RES_CQ: |
2634 | err = cq_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2635 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2636 | break; |
2637 | |
2638 | case RES_SRQ: |
2639 | err = srq_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2640 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2641 | break; |
2642 | |
2643 | case RES_MAC: |
2644 | err = mac_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2645 | in_param: vhcr->in_param, out_param: &vhcr->out_param, |
2646 | in_port: (vhcr->in_modifier >> 8) & 0xFF); |
2647 | break; |
2648 | |
2649 | case RES_VLAN: |
2650 | err = vlan_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2651 | in_param: vhcr->in_param, out_param: &vhcr->out_param, |
2652 | port: (vhcr->in_modifier >> 8) & 0xFF); |
2653 | break; |
2654 | |
2655 | case RES_COUNTER: |
2656 | err = counter_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2657 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2658 | break; |
2659 | |
2660 | case RES_XRCD: |
2661 | err = xrcdn_free_res(dev, slave, op: vhcr->op_modifier, cmd: alop, |
2662 | in_param: vhcr->in_param, out_param: &vhcr->out_param); |
2663 | break; |
2664 | |
2665 | default: |
2666 | break; |
2667 | } |
2668 | return err; |
2669 | } |
2670 | |
2671 | /* ugly but other choices are uglier */ |
2672 | static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) |
2673 | { |
2674 | return (be32_to_cpu(mpt->flags) >> 9) & 1; |
2675 | } |
2676 | |
2677 | static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) |
2678 | { |
2679 | return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; |
2680 | } |
2681 | |
2682 | static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) |
2683 | { |
2684 | return be32_to_cpu(mpt->mtt_sz); |
2685 | } |
2686 | |
2687 | static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) |
2688 | { |
2689 | return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; |
2690 | } |
2691 | |
2692 | static int mr_is_fmr(struct mlx4_mpt_entry *mpt) |
2693 | { |
2694 | return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; |
2695 | } |
2696 | |
2697 | static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) |
2698 | { |
2699 | return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; |
2700 | } |
2701 | |
2702 | static int mr_is_region(struct mlx4_mpt_entry *mpt) |
2703 | { |
2704 | return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; |
2705 | } |
2706 | |
2707 | static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) |
2708 | { |
2709 | return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; |
2710 | } |
2711 | |
2712 | static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) |
2713 | { |
2714 | return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; |
2715 | } |
2716 | |
2717 | static int qp_get_mtt_size(struct mlx4_qp_context *qpc) |
2718 | { |
2719 | int page_shift = (qpc->log_page_size & 0x3f) + 12; |
2720 | int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; |
2721 | int log_sq_sride = qpc->sq_size_stride & 7; |
2722 | int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; |
2723 | int log_rq_stride = qpc->rq_size_stride & 7; |
2724 | int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; |
2725 | int = (be32_to_cpu(qpc->flags) >> 13) & 1; |
2726 | u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; |
2727 | int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; |
2728 | int sq_size; |
2729 | int rq_size; |
2730 | int total_pages; |
2731 | int total_mem; |
2732 | int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; |
2733 | int tot; |
2734 | |
2735 | sq_size = 1 << (log_sq_size + log_sq_sride + 4); |
2736 | rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); |
2737 | total_mem = sq_size + rq_size; |
2738 | tot = (total_mem + (page_offset << 6)) >> page_shift; |
2739 | total_pages = !tot ? 1 : roundup_pow_of_two(tot); |
2740 | |
2741 | return total_pages; |
2742 | } |
2743 | |
2744 | static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, |
2745 | int size, struct res_mtt *mtt) |
2746 | { |
2747 | int res_start = mtt->com.res_id; |
2748 | int res_size = (1 << mtt->order); |
2749 | |
2750 | if (start < res_start || start + size > res_start + res_size) |
2751 | return -EPERM; |
2752 | return 0; |
2753 | } |
2754 | |
2755 | int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, |
2756 | struct mlx4_vhcr *vhcr, |
2757 | struct mlx4_cmd_mailbox *inbox, |
2758 | struct mlx4_cmd_mailbox *outbox, |
2759 | struct mlx4_cmd_info *cmd) |
2760 | { |
2761 | int err; |
2762 | int index = vhcr->in_modifier; |
2763 | struct res_mtt *mtt; |
2764 | struct res_mpt *mpt = NULL; |
2765 | int mtt_base = mr_get_mtt_addr(mpt: inbox->buf) / dev->caps.mtt_entry_sz; |
2766 | int phys; |
2767 | int id; |
2768 | u32 pd; |
2769 | int pd_slave; |
2770 | |
2771 | id = index & mpt_mask(dev); |
2772 | err = mr_res_start_move_to(dev, slave, index: id, state: RES_MPT_HW, mpt: &mpt); |
2773 | if (err) |
2774 | return err; |
2775 | |
2776 | /* Disable memory windows for VFs. */ |
2777 | if (!mr_is_region(mpt: inbox->buf)) { |
2778 | err = -EPERM; |
2779 | goto ex_abort; |
2780 | } |
2781 | |
2782 | /* Make sure that the PD bits related to the slave id are zeros. */ |
2783 | pd = mr_get_pd(mpt: inbox->buf); |
2784 | pd_slave = (pd >> 17) & 0x7f; |
2785 | if (pd_slave != 0 && --pd_slave != slave) { |
2786 | err = -EPERM; |
2787 | goto ex_abort; |
2788 | } |
2789 | |
2790 | if (mr_is_fmr(mpt: inbox->buf)) { |
2791 | /* FMR and Bind Enable are forbidden in slave devices. */ |
2792 | if (mr_is_bind_enabled(mpt: inbox->buf)) { |
2793 | err = -EPERM; |
2794 | goto ex_abort; |
2795 | } |
2796 | /* FMR and Memory Windows are also forbidden. */ |
2797 | if (!mr_is_region(mpt: inbox->buf)) { |
2798 | err = -EPERM; |
2799 | goto ex_abort; |
2800 | } |
2801 | } |
2802 | |
2803 | phys = mr_phys_mpt(mpt: inbox->buf); |
2804 | if (!phys) { |
2805 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
2806 | if (err) |
2807 | goto ex_abort; |
2808 | |
2809 | err = check_mtt_range(dev, slave, start: mtt_base, |
2810 | size: mr_get_mtt_size(mpt: inbox->buf), mtt); |
2811 | if (err) |
2812 | goto ex_put; |
2813 | |
2814 | mpt->mtt = mtt; |
2815 | } |
2816 | |
2817 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
2818 | if (err) |
2819 | goto ex_put; |
2820 | |
2821 | if (!phys) { |
2822 | atomic_inc(v: &mtt->ref_count); |
2823 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
2824 | } |
2825 | |
2826 | res_end_move(dev, slave, type: RES_MPT, id); |
2827 | return 0; |
2828 | |
2829 | ex_put: |
2830 | if (!phys) |
2831 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
2832 | ex_abort: |
2833 | res_abort_move(dev, slave, type: RES_MPT, id); |
2834 | |
2835 | return err; |
2836 | } |
2837 | |
2838 | int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, |
2839 | struct mlx4_vhcr *vhcr, |
2840 | struct mlx4_cmd_mailbox *inbox, |
2841 | struct mlx4_cmd_mailbox *outbox, |
2842 | struct mlx4_cmd_info *cmd) |
2843 | { |
2844 | int err; |
2845 | int index = vhcr->in_modifier; |
2846 | struct res_mpt *mpt; |
2847 | int id; |
2848 | |
2849 | id = index & mpt_mask(dev); |
2850 | err = mr_res_start_move_to(dev, slave, index: id, state: RES_MPT_MAPPED, mpt: &mpt); |
2851 | if (err) |
2852 | return err; |
2853 | |
2854 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
2855 | if (err) |
2856 | goto ex_abort; |
2857 | |
2858 | if (mpt->mtt) |
2859 | atomic_dec(v: &mpt->mtt->ref_count); |
2860 | |
2861 | res_end_move(dev, slave, type: RES_MPT, id); |
2862 | return 0; |
2863 | |
2864 | ex_abort: |
2865 | res_abort_move(dev, slave, type: RES_MPT, id); |
2866 | |
2867 | return err; |
2868 | } |
2869 | |
2870 | int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, |
2871 | struct mlx4_vhcr *vhcr, |
2872 | struct mlx4_cmd_mailbox *inbox, |
2873 | struct mlx4_cmd_mailbox *outbox, |
2874 | struct mlx4_cmd_info *cmd) |
2875 | { |
2876 | int err; |
2877 | int index = vhcr->in_modifier; |
2878 | struct res_mpt *mpt; |
2879 | int id; |
2880 | |
2881 | id = index & mpt_mask(dev); |
2882 | err = get_res(dev, slave, id, RES_MPT, &mpt); |
2883 | if (err) |
2884 | return err; |
2885 | |
2886 | if (mpt->com.from_state == RES_MPT_MAPPED) { |
2887 | /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do |
2888 | * that, the VF must read the MPT. But since the MPT entry memory is not |
2889 | * in the VF's virtual memory space, it must use QUERY_MPT to obtain the |
2890 | * entry contents. To guarantee that the MPT cannot be changed, the driver |
2891 | * must perform HW2SW_MPT before this query and return the MPT entry to HW |
2892 | * ownership fofollowing the change. The change here allows the VF to |
2893 | * perform QUERY_MPT also when the entry is in SW ownership. |
2894 | */ |
2895 | struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( |
2896 | table: &mlx4_priv(dev)->mr_table.dmpt_table, |
2897 | obj: mpt->key, NULL); |
2898 | |
2899 | if (NULL == mpt_entry || NULL == outbox->buf) { |
2900 | err = -EINVAL; |
2901 | goto out; |
2902 | } |
2903 | |
2904 | memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); |
2905 | |
2906 | err = 0; |
2907 | } else if (mpt->com.from_state == RES_MPT_HW) { |
2908 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
2909 | } else { |
2910 | err = -EBUSY; |
2911 | goto out; |
2912 | } |
2913 | |
2914 | |
2915 | out: |
2916 | put_res(dev, slave, res_id: id, type: RES_MPT); |
2917 | return err; |
2918 | } |
2919 | |
2920 | static int qp_get_rcqn(struct mlx4_qp_context *qpc) |
2921 | { |
2922 | return be32_to_cpu(qpc->cqn_recv) & 0xffffff; |
2923 | } |
2924 | |
2925 | static int qp_get_scqn(struct mlx4_qp_context *qpc) |
2926 | { |
2927 | return be32_to_cpu(qpc->cqn_send) & 0xffffff; |
2928 | } |
2929 | |
2930 | static u32 qp_get_srqn(struct mlx4_qp_context *qpc) |
2931 | { |
2932 | return be32_to_cpu(qpc->srqn) & 0x1ffffff; |
2933 | } |
2934 | |
2935 | static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, |
2936 | struct mlx4_qp_context *context) |
2937 | { |
2938 | u32 qpn = vhcr->in_modifier & 0xffffff; |
2939 | u32 qkey = 0; |
2940 | |
2941 | if (mlx4_get_parav_qkey(dev, qpn, qkey: &qkey)) |
2942 | return; |
2943 | |
2944 | /* adjust qkey in qp context */ |
2945 | context->qkey = cpu_to_be32(qkey); |
2946 | } |
2947 | |
2948 | static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, |
2949 | struct mlx4_qp_context *qpc, |
2950 | struct mlx4_cmd_mailbox *inbox); |
2951 | |
2952 | int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
2953 | struct mlx4_vhcr *vhcr, |
2954 | struct mlx4_cmd_mailbox *inbox, |
2955 | struct mlx4_cmd_mailbox *outbox, |
2956 | struct mlx4_cmd_info *cmd) |
2957 | { |
2958 | int err; |
2959 | int qpn = vhcr->in_modifier & 0x7fffff; |
2960 | struct res_mtt *mtt; |
2961 | struct res_qp *qp; |
2962 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
2963 | int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; |
2964 | int mtt_size = qp_get_mtt_size(qpc); |
2965 | struct res_cq *rcq; |
2966 | struct res_cq *scq; |
2967 | int rcqn = qp_get_rcqn(qpc); |
2968 | int scqn = qp_get_scqn(qpc); |
2969 | u32 srqn = qp_get_srqn(qpc) & 0xffffff; |
2970 | int use_srq = (qp_get_srqn(qpc) >> 24) & 1; |
2971 | struct res_srq *srq; |
2972 | int local_qpn = vhcr->in_modifier & 0xffffff; |
2973 | |
2974 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
2975 | if (err) |
2976 | return err; |
2977 | |
2978 | err = qp_res_start_move_to(dev, slave, qpn, state: RES_QP_HW, qp: &qp, alloc: 0); |
2979 | if (err) |
2980 | return err; |
2981 | qp->local_qpn = local_qpn; |
2982 | qp->sched_queue = 0; |
2983 | qp->param3 = 0; |
2984 | qp->vlan_control = 0; |
2985 | qp->fvl_rx = 0; |
2986 | qp->pri_path_fl = 0; |
2987 | qp->vlan_index = 0; |
2988 | qp->feup = 0; |
2989 | qp->qpc_flags = be32_to_cpu(qpc->flags); |
2990 | |
2991 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
2992 | if (err) |
2993 | goto ex_abort; |
2994 | |
2995 | err = check_mtt_range(dev, slave, start: mtt_base, size: mtt_size, mtt); |
2996 | if (err) |
2997 | goto ex_put_mtt; |
2998 | |
2999 | err = get_res(dev, slave, rcqn, RES_CQ, &rcq); |
3000 | if (err) |
3001 | goto ex_put_mtt; |
3002 | |
3003 | if (scqn != rcqn) { |
3004 | err = get_res(dev, slave, scqn, RES_CQ, &scq); |
3005 | if (err) |
3006 | goto ex_put_rcq; |
3007 | } else |
3008 | scq = rcq; |
3009 | |
3010 | if (use_srq) { |
3011 | err = get_res(dev, slave, srqn, RES_SRQ, &srq); |
3012 | if (err) |
3013 | goto ex_put_scq; |
3014 | } |
3015 | |
3016 | adjust_proxy_tun_qkey(dev, vhcr, context: qpc); |
3017 | update_pkey_index(dev, slave, inbox); |
3018 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3019 | if (err) |
3020 | goto ex_put_srq; |
3021 | atomic_inc(v: &mtt->ref_count); |
3022 | qp->mtt = mtt; |
3023 | atomic_inc(v: &rcq->ref_count); |
3024 | qp->rcq = rcq; |
3025 | atomic_inc(v: &scq->ref_count); |
3026 | qp->scq = scq; |
3027 | |
3028 | if (scqn != rcqn) |
3029 | put_res(dev, slave, res_id: scqn, type: RES_CQ); |
3030 | |
3031 | if (use_srq) { |
3032 | atomic_inc(v: &srq->ref_count); |
3033 | put_res(dev, slave, res_id: srqn, type: RES_SRQ); |
3034 | qp->srq = srq; |
3035 | } |
3036 | |
3037 | /* Save param3 for dynamic changes from VST back to VGT */ |
3038 | qp->param3 = qpc->param3; |
3039 | put_res(dev, slave, res_id: rcqn, type: RES_CQ); |
3040 | put_res(dev, slave, res_id: mtt_base, type: RES_MTT); |
3041 | res_end_move(dev, slave, type: RES_QP, id: qpn); |
3042 | |
3043 | return 0; |
3044 | |
3045 | ex_put_srq: |
3046 | if (use_srq) |
3047 | put_res(dev, slave, res_id: srqn, type: RES_SRQ); |
3048 | ex_put_scq: |
3049 | if (scqn != rcqn) |
3050 | put_res(dev, slave, res_id: scqn, type: RES_CQ); |
3051 | ex_put_rcq: |
3052 | put_res(dev, slave, res_id: rcqn, type: RES_CQ); |
3053 | ex_put_mtt: |
3054 | put_res(dev, slave, res_id: mtt_base, type: RES_MTT); |
3055 | ex_abort: |
3056 | res_abort_move(dev, slave, type: RES_QP, id: qpn); |
3057 | |
3058 | return err; |
3059 | } |
3060 | |
3061 | static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) |
3062 | { |
3063 | return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; |
3064 | } |
3065 | |
3066 | static int eq_get_mtt_size(struct mlx4_eq_context *eqc) |
3067 | { |
3068 | int log_eq_size = eqc->log_eq_size & 0x1f; |
3069 | int page_shift = (eqc->log_page_size & 0x3f) + 12; |
3070 | |
3071 | if (log_eq_size + 5 < page_shift) |
3072 | return 1; |
3073 | |
3074 | return 1 << (log_eq_size + 5 - page_shift); |
3075 | } |
3076 | |
3077 | static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) |
3078 | { |
3079 | return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; |
3080 | } |
3081 | |
3082 | static int cq_get_mtt_size(struct mlx4_cq_context *cqc) |
3083 | { |
3084 | int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; |
3085 | int page_shift = (cqc->log_page_size & 0x3f) + 12; |
3086 | |
3087 | if (log_cq_size + 5 < page_shift) |
3088 | return 1; |
3089 | |
3090 | return 1 << (log_cq_size + 5 - page_shift); |
3091 | } |
3092 | |
3093 | int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, |
3094 | struct mlx4_vhcr *vhcr, |
3095 | struct mlx4_cmd_mailbox *inbox, |
3096 | struct mlx4_cmd_mailbox *outbox, |
3097 | struct mlx4_cmd_info *cmd) |
3098 | { |
3099 | int err; |
3100 | int eqn = vhcr->in_modifier; |
3101 | int res_id = (slave << 10) | eqn; |
3102 | struct mlx4_eq_context *eqc = inbox->buf; |
3103 | int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; |
3104 | int mtt_size = eq_get_mtt_size(eqc); |
3105 | struct res_eq *eq; |
3106 | struct res_mtt *mtt; |
3107 | |
3108 | err = add_res_range(dev, slave, base: res_id, count: 1, type: RES_EQ, extra: 0); |
3109 | if (err) |
3110 | return err; |
3111 | err = eq_res_start_move_to(dev, slave, index: res_id, state: RES_EQ_HW, eq: &eq); |
3112 | if (err) |
3113 | goto out_add; |
3114 | |
3115 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
3116 | if (err) |
3117 | goto out_move; |
3118 | |
3119 | err = check_mtt_range(dev, slave, start: mtt_base, size: mtt_size, mtt); |
3120 | if (err) |
3121 | goto out_put; |
3122 | |
3123 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3124 | if (err) |
3125 | goto out_put; |
3126 | |
3127 | atomic_inc(v: &mtt->ref_count); |
3128 | eq->mtt = mtt; |
3129 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3130 | res_end_move(dev, slave, type: RES_EQ, id: res_id); |
3131 | return 0; |
3132 | |
3133 | out_put: |
3134 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3135 | out_move: |
3136 | res_abort_move(dev, slave, type: RES_EQ, id: res_id); |
3137 | out_add: |
3138 | rem_res_range(dev, slave, base: res_id, count: 1, type: RES_EQ, extra: 0); |
3139 | return err; |
3140 | } |
3141 | |
3142 | int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, |
3143 | struct mlx4_vhcr *vhcr, |
3144 | struct mlx4_cmd_mailbox *inbox, |
3145 | struct mlx4_cmd_mailbox *outbox, |
3146 | struct mlx4_cmd_info *cmd) |
3147 | { |
3148 | int err; |
3149 | u8 get = vhcr->op_modifier; |
3150 | |
3151 | if (get != 1) |
3152 | return -EPERM; |
3153 | |
3154 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3155 | |
3156 | return err; |
3157 | } |
3158 | |
3159 | static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, |
3160 | int len, struct res_mtt **res) |
3161 | { |
3162 | struct mlx4_priv *priv = mlx4_priv(dev); |
3163 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
3164 | struct res_mtt *mtt; |
3165 | int err = -EINVAL; |
3166 | |
3167 | spin_lock_irq(lock: mlx4_tlock(dev)); |
3168 | list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], |
3169 | com.list) { |
3170 | if (!check_mtt_range(dev, slave, start, size: len, mtt)) { |
3171 | *res = mtt; |
3172 | mtt->com.from_state = mtt->com.state; |
3173 | mtt->com.state = RES_MTT_BUSY; |
3174 | err = 0; |
3175 | break; |
3176 | } |
3177 | } |
3178 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
3179 | |
3180 | return err; |
3181 | } |
3182 | |
3183 | static int verify_qp_parameters(struct mlx4_dev *dev, |
3184 | struct mlx4_vhcr *vhcr, |
3185 | struct mlx4_cmd_mailbox *inbox, |
3186 | enum qp_transition transition, u8 slave) |
3187 | { |
3188 | u32 qp_type; |
3189 | u32 qpn; |
3190 | struct mlx4_qp_context *qp_ctx; |
3191 | enum mlx4_qp_optpar optpar; |
3192 | int port; |
3193 | int num_gids; |
3194 | |
3195 | qp_ctx = inbox->buf + 8; |
3196 | qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; |
3197 | optpar = be32_to_cpu(*(__be32 *) inbox->buf); |
3198 | |
3199 | if (slave != mlx4_master_func_num(dev)) { |
3200 | qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP); |
3201 | /* setting QP rate-limit is disallowed for VFs */ |
3202 | if (qp_ctx->rate_limit_params) |
3203 | return -EPERM; |
3204 | } |
3205 | |
3206 | switch (qp_type) { |
3207 | case MLX4_QP_ST_RC: |
3208 | case MLX4_QP_ST_XRC: |
3209 | case MLX4_QP_ST_UC: |
3210 | switch (transition) { |
3211 | case QP_TRANS_INIT2RTR: |
3212 | case QP_TRANS_RTR2RTS: |
3213 | case QP_TRANS_RTS2RTS: |
3214 | case QP_TRANS_SQD2SQD: |
3215 | case QP_TRANS_SQD2RTS: |
3216 | if (slave != mlx4_master_func_num(dev)) { |
3217 | if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { |
3218 | port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; |
3219 | if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) |
3220 | num_gids = mlx4_get_slave_num_gids(dev, slave, port); |
3221 | else |
3222 | num_gids = 1; |
3223 | if (qp_ctx->pri_path.mgid_index >= num_gids) |
3224 | return -EINVAL; |
3225 | } |
3226 | if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { |
3227 | port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; |
3228 | if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) |
3229 | num_gids = mlx4_get_slave_num_gids(dev, slave, port); |
3230 | else |
3231 | num_gids = 1; |
3232 | if (qp_ctx->alt_path.mgid_index >= num_gids) |
3233 | return -EINVAL; |
3234 | } |
3235 | } |
3236 | break; |
3237 | default: |
3238 | break; |
3239 | } |
3240 | break; |
3241 | |
3242 | case MLX4_QP_ST_MLX: |
3243 | qpn = vhcr->in_modifier & 0x7fffff; |
3244 | port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; |
3245 | if (transition == QP_TRANS_INIT2RTR && |
3246 | slave != mlx4_master_func_num(dev) && |
3247 | mlx4_is_qp_reserved(dev, qpn) && |
3248 | !mlx4_vf_smi_enabled(dev, slave, port)) { |
3249 | /* only enabled VFs may create MLX proxy QPs */ |
3250 | mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n" , |
3251 | __func__, slave, port); |
3252 | return -EPERM; |
3253 | } |
3254 | break; |
3255 | |
3256 | default: |
3257 | break; |
3258 | } |
3259 | |
3260 | return 0; |
3261 | } |
3262 | |
3263 | int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, |
3264 | struct mlx4_vhcr *vhcr, |
3265 | struct mlx4_cmd_mailbox *inbox, |
3266 | struct mlx4_cmd_mailbox *outbox, |
3267 | struct mlx4_cmd_info *cmd) |
3268 | { |
3269 | struct mlx4_mtt mtt; |
3270 | __be64 *page_list = inbox->buf; |
3271 | u64 *pg_list = (u64 *)page_list; |
3272 | int i; |
3273 | struct res_mtt *rmtt = NULL; |
3274 | int start = be64_to_cpu(page_list[0]); |
3275 | int npages = vhcr->in_modifier; |
3276 | int err; |
3277 | |
3278 | err = get_containing_mtt(dev, slave, start, len: npages, res: &rmtt); |
3279 | if (err) |
3280 | return err; |
3281 | |
3282 | /* Call the SW implementation of write_mtt: |
3283 | * - Prepare a dummy mtt struct |
3284 | * - Translate inbox contents to simple addresses in host endianness */ |
3285 | mtt.offset = 0; /* TBD this is broken but I don't handle it since |
3286 | we don't really use it */ |
3287 | mtt.order = 0; |
3288 | mtt.page_shift = 0; |
3289 | for (i = 0; i < npages; ++i) |
3290 | pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); |
3291 | |
3292 | err = __mlx4_write_mtt(dev, mtt: &mtt, be64_to_cpu(page_list[0]), npages, |
3293 | page_list: ((u64 *)page_list + 2)); |
3294 | |
3295 | if (rmtt) |
3296 | put_res(dev, slave, res_id: rmtt->com.res_id, type: RES_MTT); |
3297 | |
3298 | return err; |
3299 | } |
3300 | |
3301 | int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, |
3302 | struct mlx4_vhcr *vhcr, |
3303 | struct mlx4_cmd_mailbox *inbox, |
3304 | struct mlx4_cmd_mailbox *outbox, |
3305 | struct mlx4_cmd_info *cmd) |
3306 | { |
3307 | int eqn = vhcr->in_modifier; |
3308 | int res_id = eqn | (slave << 10); |
3309 | struct res_eq *eq; |
3310 | int err; |
3311 | |
3312 | err = eq_res_start_move_to(dev, slave, index: res_id, state: RES_EQ_RESERVED, eq: &eq); |
3313 | if (err) |
3314 | return err; |
3315 | |
3316 | err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); |
3317 | if (err) |
3318 | goto ex_abort; |
3319 | |
3320 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3321 | if (err) |
3322 | goto ex_put; |
3323 | |
3324 | atomic_dec(v: &eq->mtt->ref_count); |
3325 | put_res(dev, slave, res_id: eq->mtt->com.res_id, type: RES_MTT); |
3326 | res_end_move(dev, slave, type: RES_EQ, id: res_id); |
3327 | rem_res_range(dev, slave, base: res_id, count: 1, type: RES_EQ, extra: 0); |
3328 | |
3329 | return 0; |
3330 | |
3331 | ex_put: |
3332 | put_res(dev, slave, res_id: eq->mtt->com.res_id, type: RES_MTT); |
3333 | ex_abort: |
3334 | res_abort_move(dev, slave, type: RES_EQ, id: res_id); |
3335 | |
3336 | return err; |
3337 | } |
3338 | |
3339 | int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) |
3340 | { |
3341 | struct mlx4_priv *priv = mlx4_priv(dev); |
3342 | struct mlx4_slave_event_eq_info *event_eq; |
3343 | struct mlx4_cmd_mailbox *mailbox; |
3344 | u32 in_modifier = 0; |
3345 | int err; |
3346 | int res_id; |
3347 | struct res_eq *req; |
3348 | |
3349 | if (!priv->mfunc.master.slave_state) |
3350 | return -EINVAL; |
3351 | |
3352 | /* check for slave valid, slave not PF, and slave active */ |
3353 | if (slave < 0 || slave > dev->persist->num_vfs || |
3354 | slave == dev->caps.function || |
3355 | !priv->mfunc.master.slave_state[slave].active) |
3356 | return 0; |
3357 | |
3358 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; |
3359 | |
3360 | /* Create the event only if the slave is registered */ |
3361 | if (event_eq->eqn < 0) |
3362 | return 0; |
3363 | |
3364 | mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); |
3365 | res_id = (slave << 10) | event_eq->eqn; |
3366 | err = get_res(dev, slave, res_id, RES_EQ, &req); |
3367 | if (err) |
3368 | goto unlock; |
3369 | |
3370 | if (req->com.from_state != RES_EQ_HW) { |
3371 | err = -EINVAL; |
3372 | goto put; |
3373 | } |
3374 | |
3375 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
3376 | if (IS_ERR(ptr: mailbox)) { |
3377 | err = PTR_ERR(ptr: mailbox); |
3378 | goto put; |
3379 | } |
3380 | |
3381 | if (eqe->type == MLX4_EVENT_TYPE_CMD) { |
3382 | ++event_eq->token; |
3383 | eqe->event.cmd.token = cpu_to_be16(event_eq->token); |
3384 | } |
3385 | |
3386 | memcpy(mailbox->buf, (u8 *) eqe, 28); |
3387 | |
3388 | in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); |
3389 | |
3390 | err = mlx4_cmd(dev, in_param: mailbox->dma, in_modifier, op_modifier: 0, |
3391 | op: MLX4_CMD_GEN_EQE, timeout: MLX4_CMD_TIME_CLASS_B, |
3392 | native: MLX4_CMD_NATIVE); |
3393 | |
3394 | put_res(dev, slave, res_id, type: RES_EQ); |
3395 | mutex_unlock(lock: &priv->mfunc.master.gen_eqe_mutex[slave]); |
3396 | mlx4_free_cmd_mailbox(dev, mailbox); |
3397 | return err; |
3398 | |
3399 | put: |
3400 | put_res(dev, slave, res_id, type: RES_EQ); |
3401 | |
3402 | unlock: |
3403 | mutex_unlock(lock: &priv->mfunc.master.gen_eqe_mutex[slave]); |
3404 | return err; |
3405 | } |
3406 | |
3407 | int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, |
3408 | struct mlx4_vhcr *vhcr, |
3409 | struct mlx4_cmd_mailbox *inbox, |
3410 | struct mlx4_cmd_mailbox *outbox, |
3411 | struct mlx4_cmd_info *cmd) |
3412 | { |
3413 | int eqn = vhcr->in_modifier; |
3414 | int res_id = eqn | (slave << 10); |
3415 | struct res_eq *eq; |
3416 | int err; |
3417 | |
3418 | err = get_res(dev, slave, res_id, RES_EQ, &eq); |
3419 | if (err) |
3420 | return err; |
3421 | |
3422 | if (eq->com.from_state != RES_EQ_HW) { |
3423 | err = -EINVAL; |
3424 | goto ex_put; |
3425 | } |
3426 | |
3427 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3428 | |
3429 | ex_put: |
3430 | put_res(dev, slave, res_id, type: RES_EQ); |
3431 | return err; |
3432 | } |
3433 | |
3434 | int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, |
3435 | struct mlx4_vhcr *vhcr, |
3436 | struct mlx4_cmd_mailbox *inbox, |
3437 | struct mlx4_cmd_mailbox *outbox, |
3438 | struct mlx4_cmd_info *cmd) |
3439 | { |
3440 | int err; |
3441 | int cqn = vhcr->in_modifier; |
3442 | struct mlx4_cq_context *cqc = inbox->buf; |
3443 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; |
3444 | struct res_cq *cq = NULL; |
3445 | struct res_mtt *mtt; |
3446 | |
3447 | err = cq_res_start_move_to(dev, slave, cqn, state: RES_CQ_HW, cq: &cq); |
3448 | if (err) |
3449 | return err; |
3450 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
3451 | if (err) |
3452 | goto out_move; |
3453 | err = check_mtt_range(dev, slave, start: mtt_base, size: cq_get_mtt_size(cqc), mtt); |
3454 | if (err) |
3455 | goto out_put; |
3456 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3457 | if (err) |
3458 | goto out_put; |
3459 | atomic_inc(v: &mtt->ref_count); |
3460 | cq->mtt = mtt; |
3461 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3462 | res_end_move(dev, slave, type: RES_CQ, id: cqn); |
3463 | return 0; |
3464 | |
3465 | out_put: |
3466 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3467 | out_move: |
3468 | res_abort_move(dev, slave, type: RES_CQ, id: cqn); |
3469 | return err; |
3470 | } |
3471 | |
3472 | int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, |
3473 | struct mlx4_vhcr *vhcr, |
3474 | struct mlx4_cmd_mailbox *inbox, |
3475 | struct mlx4_cmd_mailbox *outbox, |
3476 | struct mlx4_cmd_info *cmd) |
3477 | { |
3478 | int err; |
3479 | int cqn = vhcr->in_modifier; |
3480 | struct res_cq *cq = NULL; |
3481 | |
3482 | err = cq_res_start_move_to(dev, slave, cqn, state: RES_CQ_ALLOCATED, cq: &cq); |
3483 | if (err) |
3484 | return err; |
3485 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3486 | if (err) |
3487 | goto out_move; |
3488 | atomic_dec(v: &cq->mtt->ref_count); |
3489 | res_end_move(dev, slave, type: RES_CQ, id: cqn); |
3490 | return 0; |
3491 | |
3492 | out_move: |
3493 | res_abort_move(dev, slave, type: RES_CQ, id: cqn); |
3494 | return err; |
3495 | } |
3496 | |
3497 | int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, |
3498 | struct mlx4_vhcr *vhcr, |
3499 | struct mlx4_cmd_mailbox *inbox, |
3500 | struct mlx4_cmd_mailbox *outbox, |
3501 | struct mlx4_cmd_info *cmd) |
3502 | { |
3503 | int cqn = vhcr->in_modifier; |
3504 | struct res_cq *cq; |
3505 | int err; |
3506 | |
3507 | err = get_res(dev, slave, cqn, RES_CQ, &cq); |
3508 | if (err) |
3509 | return err; |
3510 | |
3511 | if (cq->com.from_state != RES_CQ_HW) |
3512 | goto ex_put; |
3513 | |
3514 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3515 | ex_put: |
3516 | put_res(dev, slave, res_id: cqn, type: RES_CQ); |
3517 | |
3518 | return err; |
3519 | } |
3520 | |
3521 | static int handle_resize(struct mlx4_dev *dev, int slave, |
3522 | struct mlx4_vhcr *vhcr, |
3523 | struct mlx4_cmd_mailbox *inbox, |
3524 | struct mlx4_cmd_mailbox *outbox, |
3525 | struct mlx4_cmd_info *cmd, |
3526 | struct res_cq *cq) |
3527 | { |
3528 | int err; |
3529 | struct res_mtt *orig_mtt; |
3530 | struct res_mtt *mtt; |
3531 | struct mlx4_cq_context *cqc = inbox->buf; |
3532 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; |
3533 | |
3534 | err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); |
3535 | if (err) |
3536 | return err; |
3537 | |
3538 | if (orig_mtt != cq->mtt) { |
3539 | err = -EINVAL; |
3540 | goto ex_put; |
3541 | } |
3542 | |
3543 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
3544 | if (err) |
3545 | goto ex_put; |
3546 | |
3547 | err = check_mtt_range(dev, slave, start: mtt_base, size: cq_get_mtt_size(cqc), mtt); |
3548 | if (err) |
3549 | goto ex_put1; |
3550 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3551 | if (err) |
3552 | goto ex_put1; |
3553 | atomic_dec(v: &orig_mtt->ref_count); |
3554 | put_res(dev, slave, res_id: orig_mtt->com.res_id, type: RES_MTT); |
3555 | atomic_inc(v: &mtt->ref_count); |
3556 | cq->mtt = mtt; |
3557 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3558 | return 0; |
3559 | |
3560 | ex_put1: |
3561 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3562 | ex_put: |
3563 | put_res(dev, slave, res_id: orig_mtt->com.res_id, type: RES_MTT); |
3564 | |
3565 | return err; |
3566 | |
3567 | } |
3568 | |
3569 | int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, |
3570 | struct mlx4_vhcr *vhcr, |
3571 | struct mlx4_cmd_mailbox *inbox, |
3572 | struct mlx4_cmd_mailbox *outbox, |
3573 | struct mlx4_cmd_info *cmd) |
3574 | { |
3575 | int cqn = vhcr->in_modifier; |
3576 | struct res_cq *cq; |
3577 | int err; |
3578 | |
3579 | err = get_res(dev, slave, cqn, RES_CQ, &cq); |
3580 | if (err) |
3581 | return err; |
3582 | |
3583 | if (cq->com.from_state != RES_CQ_HW) |
3584 | goto ex_put; |
3585 | |
3586 | if (vhcr->op_modifier == 0) { |
3587 | err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); |
3588 | goto ex_put; |
3589 | } |
3590 | |
3591 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3592 | ex_put: |
3593 | put_res(dev, slave, res_id: cqn, type: RES_CQ); |
3594 | |
3595 | return err; |
3596 | } |
3597 | |
3598 | static int srq_get_mtt_size(struct mlx4_srq_context *srqc) |
3599 | { |
3600 | int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; |
3601 | int log_rq_stride = srqc->logstride & 7; |
3602 | int page_shift = (srqc->log_page_size & 0x3f) + 12; |
3603 | |
3604 | if (log_srq_size + log_rq_stride + 4 < page_shift) |
3605 | return 1; |
3606 | |
3607 | return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); |
3608 | } |
3609 | |
3610 | int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
3611 | struct mlx4_vhcr *vhcr, |
3612 | struct mlx4_cmd_mailbox *inbox, |
3613 | struct mlx4_cmd_mailbox *outbox, |
3614 | struct mlx4_cmd_info *cmd) |
3615 | { |
3616 | int err; |
3617 | int srqn = vhcr->in_modifier; |
3618 | struct res_mtt *mtt; |
3619 | struct res_srq *srq = NULL; |
3620 | struct mlx4_srq_context *srqc = inbox->buf; |
3621 | int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; |
3622 | |
3623 | if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) |
3624 | return -EINVAL; |
3625 | |
3626 | err = srq_res_start_move_to(dev, slave, index: srqn, state: RES_SRQ_HW, srq: &srq); |
3627 | if (err) |
3628 | return err; |
3629 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
3630 | if (err) |
3631 | goto ex_abort; |
3632 | err = check_mtt_range(dev, slave, start: mtt_base, size: srq_get_mtt_size(srqc), |
3633 | mtt); |
3634 | if (err) |
3635 | goto ex_put_mtt; |
3636 | |
3637 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3638 | if (err) |
3639 | goto ex_put_mtt; |
3640 | |
3641 | atomic_inc(v: &mtt->ref_count); |
3642 | srq->mtt = mtt; |
3643 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3644 | res_end_move(dev, slave, type: RES_SRQ, id: srqn); |
3645 | return 0; |
3646 | |
3647 | ex_put_mtt: |
3648 | put_res(dev, slave, res_id: mtt->com.res_id, type: RES_MTT); |
3649 | ex_abort: |
3650 | res_abort_move(dev, slave, type: RES_SRQ, id: srqn); |
3651 | |
3652 | return err; |
3653 | } |
3654 | |
3655 | int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
3656 | struct mlx4_vhcr *vhcr, |
3657 | struct mlx4_cmd_mailbox *inbox, |
3658 | struct mlx4_cmd_mailbox *outbox, |
3659 | struct mlx4_cmd_info *cmd) |
3660 | { |
3661 | int err; |
3662 | int srqn = vhcr->in_modifier; |
3663 | struct res_srq *srq = NULL; |
3664 | |
3665 | err = srq_res_start_move_to(dev, slave, index: srqn, state: RES_SRQ_ALLOCATED, srq: &srq); |
3666 | if (err) |
3667 | return err; |
3668 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3669 | if (err) |
3670 | goto ex_abort; |
3671 | atomic_dec(v: &srq->mtt->ref_count); |
3672 | if (srq->cq) |
3673 | atomic_dec(v: &srq->cq->ref_count); |
3674 | res_end_move(dev, slave, type: RES_SRQ, id: srqn); |
3675 | |
3676 | return 0; |
3677 | |
3678 | ex_abort: |
3679 | res_abort_move(dev, slave, type: RES_SRQ, id: srqn); |
3680 | |
3681 | return err; |
3682 | } |
3683 | |
3684 | int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
3685 | struct mlx4_vhcr *vhcr, |
3686 | struct mlx4_cmd_mailbox *inbox, |
3687 | struct mlx4_cmd_mailbox *outbox, |
3688 | struct mlx4_cmd_info *cmd) |
3689 | { |
3690 | int err; |
3691 | int srqn = vhcr->in_modifier; |
3692 | struct res_srq *srq; |
3693 | |
3694 | err = get_res(dev, slave, srqn, RES_SRQ, &srq); |
3695 | if (err) |
3696 | return err; |
3697 | if (srq->com.from_state != RES_SRQ_HW) { |
3698 | err = -EBUSY; |
3699 | goto out; |
3700 | } |
3701 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3702 | out: |
3703 | put_res(dev, slave, res_id: srqn, type: RES_SRQ); |
3704 | return err; |
3705 | } |
3706 | |
3707 | int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
3708 | struct mlx4_vhcr *vhcr, |
3709 | struct mlx4_cmd_mailbox *inbox, |
3710 | struct mlx4_cmd_mailbox *outbox, |
3711 | struct mlx4_cmd_info *cmd) |
3712 | { |
3713 | int err; |
3714 | int srqn = vhcr->in_modifier; |
3715 | struct res_srq *srq; |
3716 | |
3717 | err = get_res(dev, slave, srqn, RES_SRQ, &srq); |
3718 | if (err) |
3719 | return err; |
3720 | |
3721 | if (srq->com.from_state != RES_SRQ_HW) { |
3722 | err = -EBUSY; |
3723 | goto out; |
3724 | } |
3725 | |
3726 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3727 | out: |
3728 | put_res(dev, slave, res_id: srqn, type: RES_SRQ); |
3729 | return err; |
3730 | } |
3731 | |
3732 | int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, |
3733 | struct mlx4_vhcr *vhcr, |
3734 | struct mlx4_cmd_mailbox *inbox, |
3735 | struct mlx4_cmd_mailbox *outbox, |
3736 | struct mlx4_cmd_info *cmd) |
3737 | { |
3738 | int err; |
3739 | int qpn = vhcr->in_modifier & 0x7fffff; |
3740 | struct res_qp *qp; |
3741 | |
3742 | err = get_res(dev, slave, qpn, RES_QP, &qp); |
3743 | if (err) |
3744 | return err; |
3745 | if (qp->com.from_state != RES_QP_HW) { |
3746 | err = -EBUSY; |
3747 | goto out; |
3748 | } |
3749 | |
3750 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3751 | out: |
3752 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
3753 | return err; |
3754 | } |
3755 | |
3756 | int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
3757 | struct mlx4_vhcr *vhcr, |
3758 | struct mlx4_cmd_mailbox *inbox, |
3759 | struct mlx4_cmd_mailbox *outbox, |
3760 | struct mlx4_cmd_info *cmd) |
3761 | { |
3762 | struct mlx4_qp_context *context = inbox->buf + 8; |
3763 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3764 | update_pkey_index(dev, slave, inbox); |
3765 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3766 | } |
3767 | |
3768 | static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, |
3769 | struct mlx4_qp_context *qpc, |
3770 | struct mlx4_cmd_mailbox *inbox) |
3771 | { |
3772 | enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); |
3773 | u8 pri_sched_queue; |
3774 | int port = mlx4_slave_convert_port( |
3775 | dev, slave, port: (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; |
3776 | |
3777 | if (port < 0) |
3778 | return -EINVAL; |
3779 | |
3780 | pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | |
3781 | ((port & 1) << 6); |
3782 | |
3783 | if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) || |
3784 | qpc->pri_path.sched_queue || mlx4_is_eth(dev, port: port + 1)) { |
3785 | qpc->pri_path.sched_queue = pri_sched_queue; |
3786 | } |
3787 | |
3788 | if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { |
3789 | port = mlx4_slave_convert_port( |
3790 | dev, slave, port: (qpc->alt_path.sched_queue >> 6 & 1) |
3791 | + 1) - 1; |
3792 | if (port < 0) |
3793 | return -EINVAL; |
3794 | qpc->alt_path.sched_queue = |
3795 | (qpc->alt_path.sched_queue & ~(1 << 6)) | |
3796 | (port & 1) << 6; |
3797 | } |
3798 | return 0; |
3799 | } |
3800 | |
3801 | static int roce_verify_mac(struct mlx4_dev *dev, int slave, |
3802 | struct mlx4_qp_context *qpc, |
3803 | struct mlx4_cmd_mailbox *inbox) |
3804 | { |
3805 | u64 mac; |
3806 | int port; |
3807 | u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; |
3808 | u8 sched = *(u8 *)(inbox->buf + 64); |
3809 | u8 smac_ix; |
3810 | |
3811 | port = (sched >> 6 & 1) + 1; |
3812 | if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { |
3813 | smac_ix = qpc->pri_path.grh_mylmc & 0x7f; |
3814 | if (mac_find_smac_ix_in_slave(dev, slave, port, smac_index: smac_ix, mac: &mac)) |
3815 | return -ENOENT; |
3816 | } |
3817 | return 0; |
3818 | } |
3819 | |
3820 | int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, |
3821 | struct mlx4_vhcr *vhcr, |
3822 | struct mlx4_cmd_mailbox *inbox, |
3823 | struct mlx4_cmd_mailbox *outbox, |
3824 | struct mlx4_cmd_info *cmd) |
3825 | { |
3826 | int err; |
3827 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
3828 | int qpn = vhcr->in_modifier & 0x7fffff; |
3829 | struct res_qp *qp; |
3830 | u8 orig_sched_queue; |
3831 | u8 orig_vlan_control = qpc->pri_path.vlan_control; |
3832 | u8 orig_fvl_rx = qpc->pri_path.fvl_rx; |
3833 | u8 orig_pri_path_fl = qpc->pri_path.fl; |
3834 | u8 orig_vlan_index = qpc->pri_path.vlan_index; |
3835 | u8 orig_feup = qpc->pri_path.feup; |
3836 | |
3837 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
3838 | if (err) |
3839 | return err; |
3840 | err = verify_qp_parameters(dev, vhcr, inbox, transition: QP_TRANS_INIT2RTR, slave); |
3841 | if (err) |
3842 | return err; |
3843 | |
3844 | if (roce_verify_mac(dev, slave, qpc, inbox)) |
3845 | return -EINVAL; |
3846 | |
3847 | update_pkey_index(dev, slave, inbox); |
3848 | update_gid(dev, inbox, slave: (u8)slave); |
3849 | adjust_proxy_tun_qkey(dev, vhcr, context: qpc); |
3850 | orig_sched_queue = qpc->pri_path.sched_queue; |
3851 | |
3852 | err = get_res(dev, slave, qpn, RES_QP, &qp); |
3853 | if (err) |
3854 | return err; |
3855 | if (qp->com.from_state != RES_QP_HW) { |
3856 | err = -EBUSY; |
3857 | goto out; |
3858 | } |
3859 | |
3860 | err = update_vport_qp_param(dev, inbox, slave, qpn); |
3861 | if (err) |
3862 | goto out; |
3863 | |
3864 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3865 | out: |
3866 | /* if no error, save sched queue value passed in by VF. This is |
3867 | * essentially the QOS value provided by the VF. This will be useful |
3868 | * if we allow dynamic changes from VST back to VGT |
3869 | */ |
3870 | if (!err) { |
3871 | qp->sched_queue = orig_sched_queue; |
3872 | qp->vlan_control = orig_vlan_control; |
3873 | qp->fvl_rx = orig_fvl_rx; |
3874 | qp->pri_path_fl = orig_pri_path_fl; |
3875 | qp->vlan_index = orig_vlan_index; |
3876 | qp->feup = orig_feup; |
3877 | } |
3878 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
3879 | return err; |
3880 | } |
3881 | |
3882 | int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
3883 | struct mlx4_vhcr *vhcr, |
3884 | struct mlx4_cmd_mailbox *inbox, |
3885 | struct mlx4_cmd_mailbox *outbox, |
3886 | struct mlx4_cmd_info *cmd) |
3887 | { |
3888 | int err; |
3889 | struct mlx4_qp_context *context = inbox->buf + 8; |
3890 | |
3891 | err = adjust_qp_sched_queue(dev, slave, qpc: context, inbox); |
3892 | if (err) |
3893 | return err; |
3894 | err = verify_qp_parameters(dev, vhcr, inbox, transition: QP_TRANS_RTR2RTS, slave); |
3895 | if (err) |
3896 | return err; |
3897 | |
3898 | update_pkey_index(dev, slave, inbox); |
3899 | update_gid(dev, inbox, slave: (u8)slave); |
3900 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3901 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3902 | } |
3903 | |
3904 | int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
3905 | struct mlx4_vhcr *vhcr, |
3906 | struct mlx4_cmd_mailbox *inbox, |
3907 | struct mlx4_cmd_mailbox *outbox, |
3908 | struct mlx4_cmd_info *cmd) |
3909 | { |
3910 | int err; |
3911 | struct mlx4_qp_context *context = inbox->buf + 8; |
3912 | |
3913 | err = adjust_qp_sched_queue(dev, slave, qpc: context, inbox); |
3914 | if (err) |
3915 | return err; |
3916 | err = verify_qp_parameters(dev, vhcr, inbox, transition: QP_TRANS_RTS2RTS, slave); |
3917 | if (err) |
3918 | return err; |
3919 | |
3920 | update_pkey_index(dev, slave, inbox); |
3921 | update_gid(dev, inbox, slave: (u8)slave); |
3922 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3923 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3924 | } |
3925 | |
3926 | |
3927 | int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
3928 | struct mlx4_vhcr *vhcr, |
3929 | struct mlx4_cmd_mailbox *inbox, |
3930 | struct mlx4_cmd_mailbox *outbox, |
3931 | struct mlx4_cmd_info *cmd) |
3932 | { |
3933 | struct mlx4_qp_context *context = inbox->buf + 8; |
3934 | int err = adjust_qp_sched_queue(dev, slave, qpc: context, inbox); |
3935 | if (err) |
3936 | return err; |
3937 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3938 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3939 | } |
3940 | |
3941 | int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, |
3942 | struct mlx4_vhcr *vhcr, |
3943 | struct mlx4_cmd_mailbox *inbox, |
3944 | struct mlx4_cmd_mailbox *outbox, |
3945 | struct mlx4_cmd_info *cmd) |
3946 | { |
3947 | int err; |
3948 | struct mlx4_qp_context *context = inbox->buf + 8; |
3949 | |
3950 | err = adjust_qp_sched_queue(dev, slave, qpc: context, inbox); |
3951 | if (err) |
3952 | return err; |
3953 | err = verify_qp_parameters(dev, vhcr, inbox, transition: QP_TRANS_SQD2SQD, slave); |
3954 | if (err) |
3955 | return err; |
3956 | |
3957 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3958 | update_gid(dev, inbox, slave: (u8)slave); |
3959 | update_pkey_index(dev, slave, inbox); |
3960 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3961 | } |
3962 | |
3963 | int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
3964 | struct mlx4_vhcr *vhcr, |
3965 | struct mlx4_cmd_mailbox *inbox, |
3966 | struct mlx4_cmd_mailbox *outbox, |
3967 | struct mlx4_cmd_info *cmd) |
3968 | { |
3969 | int err; |
3970 | struct mlx4_qp_context *context = inbox->buf + 8; |
3971 | |
3972 | err = adjust_qp_sched_queue(dev, slave, qpc: context, inbox); |
3973 | if (err) |
3974 | return err; |
3975 | err = verify_qp_parameters(dev, vhcr, inbox, transition: QP_TRANS_SQD2RTS, slave); |
3976 | if (err) |
3977 | return err; |
3978 | |
3979 | adjust_proxy_tun_qkey(dev, vhcr, context); |
3980 | update_gid(dev, inbox, slave: (u8)slave); |
3981 | update_pkey_index(dev, slave, inbox); |
3982 | return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3983 | } |
3984 | |
3985 | int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, |
3986 | struct mlx4_vhcr *vhcr, |
3987 | struct mlx4_cmd_mailbox *inbox, |
3988 | struct mlx4_cmd_mailbox *outbox, |
3989 | struct mlx4_cmd_info *cmd) |
3990 | { |
3991 | int err; |
3992 | int qpn = vhcr->in_modifier & 0x7fffff; |
3993 | struct res_qp *qp; |
3994 | |
3995 | err = qp_res_start_move_to(dev, slave, qpn, state: RES_QP_MAPPED, qp: &qp, alloc: 0); |
3996 | if (err) |
3997 | return err; |
3998 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
3999 | if (err) |
4000 | goto ex_abort; |
4001 | |
4002 | atomic_dec(v: &qp->mtt->ref_count); |
4003 | atomic_dec(v: &qp->rcq->ref_count); |
4004 | atomic_dec(v: &qp->scq->ref_count); |
4005 | if (qp->srq) |
4006 | atomic_dec(v: &qp->srq->ref_count); |
4007 | res_end_move(dev, slave, type: RES_QP, id: qpn); |
4008 | return 0; |
4009 | |
4010 | ex_abort: |
4011 | res_abort_move(dev, slave, type: RES_QP, id: qpn); |
4012 | |
4013 | return err; |
4014 | } |
4015 | |
4016 | static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, |
4017 | struct res_qp *rqp, u8 *gid) |
4018 | { |
4019 | struct res_gid *res; |
4020 | |
4021 | list_for_each_entry(res, &rqp->mcg_list, list) { |
4022 | if (!memcmp(p: res->gid, q: gid, size: 16)) |
4023 | return res; |
4024 | } |
4025 | return NULL; |
4026 | } |
4027 | |
4028 | static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, |
4029 | u8 *gid, enum mlx4_protocol prot, |
4030 | enum mlx4_steer_type steer, u64 reg_id) |
4031 | { |
4032 | struct res_gid *res; |
4033 | int err; |
4034 | |
4035 | res = kzalloc(size: sizeof(*res), GFP_KERNEL); |
4036 | if (!res) |
4037 | return -ENOMEM; |
4038 | |
4039 | spin_lock_irq(lock: &rqp->mcg_spl); |
4040 | if (find_gid(dev, slave, rqp, gid)) { |
4041 | kfree(objp: res); |
4042 | err = -EEXIST; |
4043 | } else { |
4044 | memcpy(res->gid, gid, 16); |
4045 | res->prot = prot; |
4046 | res->steer = steer; |
4047 | res->reg_id = reg_id; |
4048 | list_add_tail(new: &res->list, head: &rqp->mcg_list); |
4049 | err = 0; |
4050 | } |
4051 | spin_unlock_irq(lock: &rqp->mcg_spl); |
4052 | |
4053 | return err; |
4054 | } |
4055 | |
4056 | static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, |
4057 | u8 *gid, enum mlx4_protocol prot, |
4058 | enum mlx4_steer_type steer, u64 *reg_id) |
4059 | { |
4060 | struct res_gid *res; |
4061 | int err; |
4062 | |
4063 | spin_lock_irq(lock: &rqp->mcg_spl); |
4064 | res = find_gid(dev, slave, rqp, gid); |
4065 | if (!res || res->prot != prot || res->steer != steer) |
4066 | err = -EINVAL; |
4067 | else { |
4068 | *reg_id = res->reg_id; |
4069 | list_del(entry: &res->list); |
4070 | kfree(objp: res); |
4071 | err = 0; |
4072 | } |
4073 | spin_unlock_irq(lock: &rqp->mcg_spl); |
4074 | |
4075 | return err; |
4076 | } |
4077 | |
4078 | static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, |
4079 | u8 gid[16], int block_loopback, enum mlx4_protocol prot, |
4080 | enum mlx4_steer_type type, u64 *reg_id) |
4081 | { |
4082 | switch (dev->caps.steering_mode) { |
4083 | case MLX4_STEERING_MODE_DEVICE_MANAGED: { |
4084 | int port = mlx4_slave_convert_port(dev, slave, port: gid[5]); |
4085 | if (port < 0) |
4086 | return port; |
4087 | return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, |
4088 | block_mcast_loopback: block_loopback, prot, |
4089 | reg_id); |
4090 | } |
4091 | case MLX4_STEERING_MODE_B0: |
4092 | if (prot == MLX4_PROT_ETH) { |
4093 | int port = mlx4_slave_convert_port(dev, slave, port: gid[5]); |
4094 | if (port < 0) |
4095 | return port; |
4096 | gid[5] = port; |
4097 | } |
4098 | return mlx4_qp_attach_common(dev, qp, gid, |
4099 | block_mcast_loopback: block_loopback, prot, steer: type); |
4100 | default: |
4101 | return -EINVAL; |
4102 | } |
4103 | } |
4104 | |
4105 | static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, |
4106 | u8 gid[16], enum mlx4_protocol prot, |
4107 | enum mlx4_steer_type type, u64 reg_id) |
4108 | { |
4109 | switch (dev->caps.steering_mode) { |
4110 | case MLX4_STEERING_MODE_DEVICE_MANAGED: |
4111 | return mlx4_flow_detach(dev, reg_id); |
4112 | case MLX4_STEERING_MODE_B0: |
4113 | return mlx4_qp_detach_common(dev, qp, gid, prot, steer: type); |
4114 | default: |
4115 | return -EINVAL; |
4116 | } |
4117 | } |
4118 | |
4119 | static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, |
4120 | u8 *gid, enum mlx4_protocol prot) |
4121 | { |
4122 | int real_port; |
4123 | |
4124 | if (prot != MLX4_PROT_ETH) |
4125 | return 0; |
4126 | |
4127 | if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || |
4128 | dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { |
4129 | real_port = mlx4_slave_convert_port(dev, slave, port: gid[5]); |
4130 | if (real_port < 0) |
4131 | return -EINVAL; |
4132 | gid[5] = real_port; |
4133 | } |
4134 | |
4135 | return 0; |
4136 | } |
4137 | |
4138 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
4139 | struct mlx4_vhcr *vhcr, |
4140 | struct mlx4_cmd_mailbox *inbox, |
4141 | struct mlx4_cmd_mailbox *outbox, |
4142 | struct mlx4_cmd_info *cmd) |
4143 | { |
4144 | struct mlx4_qp qp; /* dummy for calling attach/detach */ |
4145 | u8 *gid = inbox->buf; |
4146 | enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; |
4147 | int err; |
4148 | int qpn; |
4149 | struct res_qp *rqp; |
4150 | u64 reg_id = 0; |
4151 | int attach = vhcr->op_modifier; |
4152 | int block_loopback = vhcr->in_modifier >> 31; |
4153 | u8 steer_type_mask = 2; |
4154 | enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; |
4155 | |
4156 | qpn = vhcr->in_modifier & 0xffffff; |
4157 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
4158 | if (err) |
4159 | return err; |
4160 | |
4161 | qp.qpn = qpn; |
4162 | if (attach) { |
4163 | err = qp_attach(dev, slave, qp: &qp, gid, block_loopback, prot, |
4164 | type, reg_id: ®_id); |
4165 | if (err) { |
4166 | pr_err("Fail to attach rule to qp 0x%x\n" , qpn); |
4167 | goto ex_put; |
4168 | } |
4169 | err = add_mcg_res(dev, slave, rqp, gid, prot, steer: type, reg_id); |
4170 | if (err) |
4171 | goto ex_detach; |
4172 | } else { |
4173 | err = mlx4_adjust_port(dev, slave, gid, prot); |
4174 | if (err) |
4175 | goto ex_put; |
4176 | |
4177 | err = rem_mcg_res(dev, slave, rqp, gid, prot, steer: type, reg_id: ®_id); |
4178 | if (err) |
4179 | goto ex_put; |
4180 | |
4181 | err = qp_detach(dev, qp: &qp, gid, prot, type, reg_id); |
4182 | if (err) |
4183 | pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n" , |
4184 | qpn, reg_id); |
4185 | } |
4186 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
4187 | return err; |
4188 | |
4189 | ex_detach: |
4190 | qp_detach(dev, qp: &qp, gid, prot, type, reg_id); |
4191 | ex_put: |
4192 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
4193 | return err; |
4194 | } |
4195 | |
4196 | /* |
4197 | * MAC validation for Flow Steering rules. |
4198 | * VF can attach rules only with a mac address which is assigned to it. |
4199 | */ |
4200 | static int (int slave, struct _rule_hw *, |
4201 | struct list_head *rlist) |
4202 | { |
4203 | struct mac_res *res, *tmp; |
4204 | __be64 be_mac; |
4205 | |
4206 | /* make sure it isn't multicast or broadcast mac*/ |
4207 | if (!is_multicast_ether_addr(addr: eth_header->eth.dst_mac) && |
4208 | !is_broadcast_ether_addr(addr: eth_header->eth.dst_mac)) { |
4209 | list_for_each_entry_safe(res, tmp, rlist, list) { |
4210 | be_mac = cpu_to_be64(res->mac << 16); |
4211 | if (ether_addr_equal(addr1: (u8 *)&be_mac, addr2: eth_header->eth.dst_mac)) |
4212 | return 0; |
4213 | } |
4214 | pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n" , |
4215 | eth_header->eth.dst_mac, slave); |
4216 | return -EINVAL; |
4217 | } |
4218 | return 0; |
4219 | } |
4220 | |
4221 | /* |
4222 | * In case of missing eth header, append eth header with a MAC address |
4223 | * assigned to the VF. |
4224 | */ |
4225 | static int (struct mlx4_dev *dev, int slave, |
4226 | struct mlx4_cmd_mailbox *inbox, |
4227 | struct list_head *rlist, int ) |
4228 | { |
4229 | struct mac_res *res, *tmp; |
4230 | u8 port; |
4231 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; |
4232 | struct mlx4_net_trans_rule_hw_eth *; |
4233 | struct mlx4_net_trans_rule_hw_ipv4 *; |
4234 | struct mlx4_net_trans_rule_hw_tcp_udp *; |
4235 | __be64 be_mac = 0; |
4236 | __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); |
4237 | |
4238 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; |
4239 | port = ctrl->port; |
4240 | eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); |
4241 | |
4242 | /* Clear a space in the inbox for eth header */ |
4243 | switch (header_id) { |
4244 | case MLX4_NET_TRANS_RULE_ID_IPV4: |
4245 | ip_header = |
4246 | (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); |
4247 | memmove(ip_header, eth_header, |
4248 | sizeof(*ip_header) + sizeof(*l4_header)); |
4249 | break; |
4250 | case MLX4_NET_TRANS_RULE_ID_TCP: |
4251 | case MLX4_NET_TRANS_RULE_ID_UDP: |
4252 | l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) |
4253 | (eth_header + 1); |
4254 | memmove(l4_header, eth_header, sizeof(*l4_header)); |
4255 | break; |
4256 | default: |
4257 | return -EINVAL; |
4258 | } |
4259 | list_for_each_entry_safe(res, tmp, rlist, list) { |
4260 | if (port == res->port) { |
4261 | be_mac = cpu_to_be64(res->mac << 16); |
4262 | break; |
4263 | } |
4264 | } |
4265 | if (!be_mac) { |
4266 | pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n" , |
4267 | port); |
4268 | return -EINVAL; |
4269 | } |
4270 | |
4271 | memset(eth_header, 0, sizeof(*eth_header)); |
4272 | eth_header->size = sizeof(*eth_header) >> 2; |
4273 | eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); |
4274 | memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); |
4275 | memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); |
4276 | |
4277 | return 0; |
4278 | |
4279 | } |
4280 | |
4281 | #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \ |
4282 | 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\ |
4283 | 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB) |
4284 | int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, |
4285 | struct mlx4_vhcr *vhcr, |
4286 | struct mlx4_cmd_mailbox *inbox, |
4287 | struct mlx4_cmd_mailbox *outbox, |
4288 | struct mlx4_cmd_info *cmd_info) |
4289 | { |
4290 | int err; |
4291 | u32 qpn = vhcr->in_modifier & 0xffffff; |
4292 | struct res_qp *rqp; |
4293 | u64 mac; |
4294 | unsigned port; |
4295 | u64 pri_addr_path_mask; |
4296 | struct mlx4_update_qp_context *cmd; |
4297 | int smac_index; |
4298 | |
4299 | cmd = (struct mlx4_update_qp_context *)inbox->buf; |
4300 | |
4301 | pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); |
4302 | if (cmd->qp_mask || cmd->secondary_addr_path_mask || |
4303 | (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) |
4304 | return -EPERM; |
4305 | |
4306 | if ((pri_addr_path_mask & |
4307 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && |
4308 | !(dev->caps.flags2 & |
4309 | MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { |
4310 | mlx4_warn(dev, "Src check LB for slave %d isn't supported\n" , |
4311 | slave); |
4312 | return -EOPNOTSUPP; |
4313 | } |
4314 | |
4315 | /* Just change the smac for the QP */ |
4316 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
4317 | if (err) { |
4318 | mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n" , qpn, slave); |
4319 | return err; |
4320 | } |
4321 | |
4322 | port = (rqp->sched_queue >> 6 & 1) + 1; |
4323 | |
4324 | if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { |
4325 | smac_index = cmd->qp_context.pri_path.grh_mylmc; |
4326 | err = mac_find_smac_ix_in_slave(dev, slave, port, |
4327 | smac_index, mac: &mac); |
4328 | |
4329 | if (err) { |
4330 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n" , |
4331 | qpn, smac_index); |
4332 | goto err_mac; |
4333 | } |
4334 | } |
4335 | |
4336 | err = mlx4_cmd(dev, in_param: inbox->dma, |
4337 | in_modifier: vhcr->in_modifier, op_modifier: 0, |
4338 | op: MLX4_CMD_UPDATE_QP, timeout: MLX4_CMD_TIME_CLASS_A, |
4339 | native: MLX4_CMD_NATIVE); |
4340 | if (err) { |
4341 | mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n" , qpn); |
4342 | goto err_mac; |
4343 | } |
4344 | |
4345 | err_mac: |
4346 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
4347 | return err; |
4348 | } |
4349 | |
4350 | static u32 qp_attach_mbox_size(void *mbox) |
4351 | { |
4352 | u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); |
4353 | struct _rule_hw *; |
4354 | |
4355 | rule_header = (struct _rule_hw *)(mbox + size); |
4356 | |
4357 | while (rule_header->size) { |
4358 | size += rule_header->size * sizeof(u32); |
4359 | rule_header += 1; |
4360 | } |
4361 | return size; |
4362 | } |
4363 | |
4364 | static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); |
4365 | |
4366 | int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
4367 | struct mlx4_vhcr *vhcr, |
4368 | struct mlx4_cmd_mailbox *inbox, |
4369 | struct mlx4_cmd_mailbox *outbox, |
4370 | struct mlx4_cmd_info *cmd) |
4371 | { |
4372 | |
4373 | struct mlx4_priv *priv = mlx4_priv(dev); |
4374 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
4375 | struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; |
4376 | int err; |
4377 | int qpn; |
4378 | struct res_qp *rqp; |
4379 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; |
4380 | struct _rule_hw *; |
4381 | int ; |
4382 | struct res_fs_rule *rrule; |
4383 | u32 mbox_size; |
4384 | |
4385 | if (dev->caps.steering_mode != |
4386 | MLX4_STEERING_MODE_DEVICE_MANAGED) |
4387 | return -EOPNOTSUPP; |
4388 | |
4389 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; |
4390 | err = mlx4_slave_convert_port(dev, slave, port: ctrl->port); |
4391 | if (err <= 0) |
4392 | return -EINVAL; |
4393 | ctrl->port = err; |
4394 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; |
4395 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
4396 | if (err) { |
4397 | pr_err("Steering rule with qpn 0x%x rejected\n" , qpn); |
4398 | return err; |
4399 | } |
4400 | rule_header = (struct _rule_hw *)(ctrl + 1); |
4401 | header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); |
4402 | |
4403 | if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) |
4404 | mlx4_handle_eth_header_mcast_prio(ctrl, eth_header: rule_header); |
4405 | |
4406 | switch (header_id) { |
4407 | case MLX4_NET_TRANS_RULE_ID_ETH: |
4408 | if (validate_eth_header_mac(slave, eth_header: rule_header, rlist)) { |
4409 | err = -EINVAL; |
4410 | goto err_put_qp; |
4411 | } |
4412 | break; |
4413 | case MLX4_NET_TRANS_RULE_ID_IB: |
4414 | break; |
4415 | case MLX4_NET_TRANS_RULE_ID_IPV4: |
4416 | case MLX4_NET_TRANS_RULE_ID_TCP: |
4417 | case MLX4_NET_TRANS_RULE_ID_UDP: |
4418 | pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n" ); |
4419 | if (add_eth_header(dev, slave, inbox, rlist, header_id)) { |
4420 | err = -EINVAL; |
4421 | goto err_put_qp; |
4422 | } |
4423 | vhcr->in_modifier += |
4424 | sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; |
4425 | break; |
4426 | default: |
4427 | pr_err("Corrupted mailbox\n" ); |
4428 | err = -EINVAL; |
4429 | goto err_put_qp; |
4430 | } |
4431 | |
4432 | err = mlx4_cmd_imm(dev, in_param: inbox->dma, out_param: &vhcr->out_param, |
4433 | in_modifier: vhcr->in_modifier, op_modifier: 0, |
4434 | op: MLX4_QP_FLOW_STEERING_ATTACH, timeout: MLX4_CMD_TIME_CLASS_A, |
4435 | native: MLX4_CMD_NATIVE); |
4436 | if (err) |
4437 | goto err_put_qp; |
4438 | |
4439 | |
4440 | err = add_res_range(dev, slave, base: vhcr->out_param, count: 1, type: RES_FS_RULE, extra: qpn); |
4441 | if (err) { |
4442 | mlx4_err(dev, "Fail to add flow steering resources\n" ); |
4443 | goto err_detach; |
4444 | } |
4445 | |
4446 | err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); |
4447 | if (err) |
4448 | goto err_detach; |
4449 | |
4450 | mbox_size = qp_attach_mbox_size(mbox: inbox->buf); |
4451 | rrule->mirr_mbox = kmalloc(size: mbox_size, GFP_KERNEL); |
4452 | if (!rrule->mirr_mbox) { |
4453 | err = -ENOMEM; |
4454 | goto err_put_rule; |
4455 | } |
4456 | rrule->mirr_mbox_size = mbox_size; |
4457 | rrule->mirr_rule_id = 0; |
4458 | memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); |
4459 | |
4460 | /* set different port */ |
4461 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; |
4462 | if (ctrl->port == 1) |
4463 | ctrl->port = 2; |
4464 | else |
4465 | ctrl->port = 1; |
4466 | |
4467 | if (mlx4_is_bonded(dev)) |
4468 | mlx4_do_mirror_rule(dev, fs_rule: rrule); |
4469 | |
4470 | atomic_inc(v: &rqp->ref_count); |
4471 | |
4472 | err_put_rule: |
4473 | put_res(dev, slave, res_id: vhcr->out_param, type: RES_FS_RULE); |
4474 | err_detach: |
4475 | /* detach rule on error */ |
4476 | if (err) |
4477 | mlx4_cmd(dev, in_param: vhcr->out_param, in_modifier: 0, op_modifier: 0, |
4478 | op: MLX4_QP_FLOW_STEERING_DETACH, timeout: MLX4_CMD_TIME_CLASS_A, |
4479 | native: MLX4_CMD_NATIVE); |
4480 | err_put_qp: |
4481 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
4482 | return err; |
4483 | } |
4484 | |
4485 | static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) |
4486 | { |
4487 | int err; |
4488 | |
4489 | err = rem_res_range(dev, slave: fs_rule->com.owner, base: fs_rule->com.res_id, count: 1, type: RES_FS_RULE, extra: 0); |
4490 | if (err) { |
4491 | mlx4_err(dev, "Fail to remove flow steering resources\n" ); |
4492 | return err; |
4493 | } |
4494 | |
4495 | mlx4_cmd(dev, in_param: fs_rule->com.res_id, in_modifier: 0, op_modifier: 0, op: MLX4_QP_FLOW_STEERING_DETACH, |
4496 | timeout: MLX4_CMD_TIME_CLASS_A, native: MLX4_CMD_NATIVE); |
4497 | return 0; |
4498 | } |
4499 | |
4500 | int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, |
4501 | struct mlx4_vhcr *vhcr, |
4502 | struct mlx4_cmd_mailbox *inbox, |
4503 | struct mlx4_cmd_mailbox *outbox, |
4504 | struct mlx4_cmd_info *cmd) |
4505 | { |
4506 | int err; |
4507 | struct res_qp *rqp; |
4508 | struct res_fs_rule *rrule; |
4509 | u64 mirr_reg_id; |
4510 | int qpn; |
4511 | |
4512 | if (dev->caps.steering_mode != |
4513 | MLX4_STEERING_MODE_DEVICE_MANAGED) |
4514 | return -EOPNOTSUPP; |
4515 | |
4516 | err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); |
4517 | if (err) |
4518 | return err; |
4519 | |
4520 | if (!rrule->mirr_mbox) { |
4521 | mlx4_err(dev, "Mirror rules cannot be removed explicitly\n" ); |
4522 | put_res(dev, slave, res_id: vhcr->in_param, type: RES_FS_RULE); |
4523 | return -EINVAL; |
4524 | } |
4525 | mirr_reg_id = rrule->mirr_rule_id; |
4526 | kfree(objp: rrule->mirr_mbox); |
4527 | qpn = rrule->qpn; |
4528 | |
4529 | /* Release the rule form busy state before removal */ |
4530 | put_res(dev, slave, res_id: vhcr->in_param, type: RES_FS_RULE); |
4531 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
4532 | if (err) |
4533 | return err; |
4534 | |
4535 | if (mirr_reg_id && mlx4_is_bonded(dev)) { |
4536 | err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); |
4537 | if (err) { |
4538 | mlx4_err(dev, "Fail to get resource of mirror rule\n" ); |
4539 | } else { |
4540 | put_res(dev, slave, res_id: mirr_reg_id, type: RES_FS_RULE); |
4541 | mlx4_undo_mirror_rule(dev, fs_rule: rrule); |
4542 | } |
4543 | } |
4544 | err = rem_res_range(dev, slave, base: vhcr->in_param, count: 1, type: RES_FS_RULE, extra: 0); |
4545 | if (err) { |
4546 | mlx4_err(dev, "Fail to remove flow steering resources\n" ); |
4547 | goto out; |
4548 | } |
4549 | |
4550 | err = mlx4_cmd(dev, in_param: vhcr->in_param, in_modifier: 0, op_modifier: 0, |
4551 | op: MLX4_QP_FLOW_STEERING_DETACH, timeout: MLX4_CMD_TIME_CLASS_A, |
4552 | native: MLX4_CMD_NATIVE); |
4553 | if (!err) |
4554 | atomic_dec(v: &rqp->ref_count); |
4555 | out: |
4556 | put_res(dev, slave, res_id: qpn, type: RES_QP); |
4557 | return err; |
4558 | } |
4559 | |
4560 | enum { |
4561 | BUSY_MAX_RETRIES = 10 |
4562 | }; |
4563 | |
4564 | int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, |
4565 | struct mlx4_vhcr *vhcr, |
4566 | struct mlx4_cmd_mailbox *inbox, |
4567 | struct mlx4_cmd_mailbox *outbox, |
4568 | struct mlx4_cmd_info *cmd) |
4569 | { |
4570 | int err; |
4571 | int index = vhcr->in_modifier & 0xffff; |
4572 | |
4573 | err = get_res(dev, slave, index, RES_COUNTER, NULL); |
4574 | if (err) |
4575 | return err; |
4576 | |
4577 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); |
4578 | put_res(dev, slave, res_id: index, type: RES_COUNTER); |
4579 | return err; |
4580 | } |
4581 | |
4582 | static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) |
4583 | { |
4584 | struct res_gid *rgid; |
4585 | struct res_gid *tmp; |
4586 | struct mlx4_qp qp; /* dummy for calling attach/detach */ |
4587 | |
4588 | list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { |
4589 | switch (dev->caps.steering_mode) { |
4590 | case MLX4_STEERING_MODE_DEVICE_MANAGED: |
4591 | mlx4_flow_detach(dev, reg_id: rgid->reg_id); |
4592 | break; |
4593 | case MLX4_STEERING_MODE_B0: |
4594 | qp.qpn = rqp->local_qpn; |
4595 | (void) mlx4_qp_detach_common(dev, qp: &qp, gid: rgid->gid, |
4596 | prot: rgid->prot, steer: rgid->steer); |
4597 | break; |
4598 | } |
4599 | list_del(entry: &rgid->list); |
4600 | kfree(objp: rgid); |
4601 | } |
4602 | } |
4603 | |
4604 | static int _move_all_busy(struct mlx4_dev *dev, int slave, |
4605 | enum mlx4_resource type, int print) |
4606 | { |
4607 | struct mlx4_priv *priv = mlx4_priv(dev); |
4608 | struct mlx4_resource_tracker *tracker = |
4609 | &priv->mfunc.master.res_tracker; |
4610 | struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; |
4611 | struct res_common *r; |
4612 | struct res_common *tmp; |
4613 | int busy; |
4614 | |
4615 | busy = 0; |
4616 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4617 | list_for_each_entry_safe(r, tmp, rlist, list) { |
4618 | if (r->owner == slave) { |
4619 | if (!r->removing) { |
4620 | if (r->state == RES_ANY_BUSY) { |
4621 | if (print) |
4622 | mlx4_dbg(dev, |
4623 | "%s id 0x%llx is busy\n" , |
4624 | resource_str(type), |
4625 | r->res_id); |
4626 | ++busy; |
4627 | } else { |
4628 | r->from_state = r->state; |
4629 | r->state = RES_ANY_BUSY; |
4630 | r->removing = 1; |
4631 | } |
4632 | } |
4633 | } |
4634 | } |
4635 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4636 | |
4637 | return busy; |
4638 | } |
4639 | |
4640 | static int move_all_busy(struct mlx4_dev *dev, int slave, |
4641 | enum mlx4_resource type) |
4642 | { |
4643 | unsigned long begin; |
4644 | int busy; |
4645 | |
4646 | begin = jiffies; |
4647 | do { |
4648 | busy = _move_all_busy(dev, slave, type, print: 0); |
4649 | if (time_after(jiffies, begin + 5 * HZ)) |
4650 | break; |
4651 | if (busy) |
4652 | cond_resched(); |
4653 | } while (busy); |
4654 | |
4655 | if (busy) |
4656 | busy = _move_all_busy(dev, slave, type, print: 1); |
4657 | |
4658 | return busy; |
4659 | } |
4660 | static void rem_slave_qps(struct mlx4_dev *dev, int slave) |
4661 | { |
4662 | struct mlx4_priv *priv = mlx4_priv(dev); |
4663 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
4664 | struct list_head *qp_list = |
4665 | &tracker->slave_list[slave].res_list[RES_QP]; |
4666 | struct res_qp *qp; |
4667 | struct res_qp *tmp; |
4668 | int state; |
4669 | u64 in_param; |
4670 | int qpn; |
4671 | int err; |
4672 | |
4673 | err = move_all_busy(dev, slave, type: RES_QP); |
4674 | if (err) |
4675 | mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n" , |
4676 | slave); |
4677 | |
4678 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4679 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
4680 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4681 | if (qp->com.owner == slave) { |
4682 | qpn = qp->com.res_id; |
4683 | detach_qp(dev, slave, rqp: qp); |
4684 | state = qp->com.from_state; |
4685 | while (state != 0) { |
4686 | switch (state) { |
4687 | case RES_QP_RESERVED: |
4688 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4689 | rb_erase(&qp->com.node, |
4690 | &tracker->res_tree[RES_QP]); |
4691 | list_del(entry: &qp->com.list); |
4692 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4693 | if (!valid_reserved(dev, slave, qpn)) { |
4694 | __mlx4_qp_release_range(dev, base_qpn: qpn, cnt: 1); |
4695 | mlx4_release_resource(dev, slave, |
4696 | res_type: RES_QP, count: 1, port: 0); |
4697 | } |
4698 | kfree(objp: qp); |
4699 | state = 0; |
4700 | break; |
4701 | case RES_QP_MAPPED: |
4702 | if (!valid_reserved(dev, slave, qpn)) |
4703 | __mlx4_qp_free_icm(dev, qpn); |
4704 | state = RES_QP_RESERVED; |
4705 | break; |
4706 | case RES_QP_HW: |
4707 | in_param = slave; |
4708 | err = mlx4_cmd(dev, in_param, |
4709 | in_modifier: qp->local_qpn, op_modifier: 2, |
4710 | op: MLX4_CMD_2RST_QP, |
4711 | timeout: MLX4_CMD_TIME_CLASS_A, |
4712 | native: MLX4_CMD_NATIVE); |
4713 | if (err) |
4714 | mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n" , |
4715 | slave, qp->local_qpn); |
4716 | atomic_dec(v: &qp->rcq->ref_count); |
4717 | atomic_dec(v: &qp->scq->ref_count); |
4718 | atomic_dec(v: &qp->mtt->ref_count); |
4719 | if (qp->srq) |
4720 | atomic_dec(v: &qp->srq->ref_count); |
4721 | state = RES_QP_MAPPED; |
4722 | break; |
4723 | default: |
4724 | state = 0; |
4725 | } |
4726 | } |
4727 | } |
4728 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4729 | } |
4730 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4731 | } |
4732 | |
4733 | static void rem_slave_srqs(struct mlx4_dev *dev, int slave) |
4734 | { |
4735 | struct mlx4_priv *priv = mlx4_priv(dev); |
4736 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
4737 | struct list_head *srq_list = |
4738 | &tracker->slave_list[slave].res_list[RES_SRQ]; |
4739 | struct res_srq *srq; |
4740 | struct res_srq *tmp; |
4741 | int state; |
4742 | u64 in_param; |
4743 | int srqn; |
4744 | int err; |
4745 | |
4746 | err = move_all_busy(dev, slave, type: RES_SRQ); |
4747 | if (err) |
4748 | mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n" , |
4749 | slave); |
4750 | |
4751 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4752 | list_for_each_entry_safe(srq, tmp, srq_list, com.list) { |
4753 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4754 | if (srq->com.owner == slave) { |
4755 | srqn = srq->com.res_id; |
4756 | state = srq->com.from_state; |
4757 | while (state != 0) { |
4758 | switch (state) { |
4759 | case RES_SRQ_ALLOCATED: |
4760 | __mlx4_srq_free_icm(dev, srqn); |
4761 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4762 | rb_erase(&srq->com.node, |
4763 | &tracker->res_tree[RES_SRQ]); |
4764 | list_del(entry: &srq->com.list); |
4765 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4766 | mlx4_release_resource(dev, slave, |
4767 | res_type: RES_SRQ, count: 1, port: 0); |
4768 | kfree(objp: srq); |
4769 | state = 0; |
4770 | break; |
4771 | |
4772 | case RES_SRQ_HW: |
4773 | in_param = slave; |
4774 | err = mlx4_cmd(dev, in_param, in_modifier: srqn, op_modifier: 1, |
4775 | op: MLX4_CMD_HW2SW_SRQ, |
4776 | timeout: MLX4_CMD_TIME_CLASS_A, |
4777 | native: MLX4_CMD_NATIVE); |
4778 | if (err) |
4779 | mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n" , |
4780 | slave, srqn); |
4781 | |
4782 | atomic_dec(v: &srq->mtt->ref_count); |
4783 | if (srq->cq) |
4784 | atomic_dec(v: &srq->cq->ref_count); |
4785 | state = RES_SRQ_ALLOCATED; |
4786 | break; |
4787 | |
4788 | default: |
4789 | state = 0; |
4790 | } |
4791 | } |
4792 | } |
4793 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4794 | } |
4795 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4796 | } |
4797 | |
4798 | static void rem_slave_cqs(struct mlx4_dev *dev, int slave) |
4799 | { |
4800 | struct mlx4_priv *priv = mlx4_priv(dev); |
4801 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
4802 | struct list_head *cq_list = |
4803 | &tracker->slave_list[slave].res_list[RES_CQ]; |
4804 | struct res_cq *cq; |
4805 | struct res_cq *tmp; |
4806 | int state; |
4807 | u64 in_param; |
4808 | int cqn; |
4809 | int err; |
4810 | |
4811 | err = move_all_busy(dev, slave, type: RES_CQ); |
4812 | if (err) |
4813 | mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n" , |
4814 | slave); |
4815 | |
4816 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4817 | list_for_each_entry_safe(cq, tmp, cq_list, com.list) { |
4818 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4819 | if (cq->com.owner == slave && !atomic_read(v: &cq->ref_count)) { |
4820 | cqn = cq->com.res_id; |
4821 | state = cq->com.from_state; |
4822 | while (state != 0) { |
4823 | switch (state) { |
4824 | case RES_CQ_ALLOCATED: |
4825 | __mlx4_cq_free_icm(dev, cqn); |
4826 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4827 | rb_erase(&cq->com.node, |
4828 | &tracker->res_tree[RES_CQ]); |
4829 | list_del(entry: &cq->com.list); |
4830 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4831 | mlx4_release_resource(dev, slave, |
4832 | res_type: RES_CQ, count: 1, port: 0); |
4833 | kfree(objp: cq); |
4834 | state = 0; |
4835 | break; |
4836 | |
4837 | case RES_CQ_HW: |
4838 | in_param = slave; |
4839 | err = mlx4_cmd(dev, in_param, in_modifier: cqn, op_modifier: 1, |
4840 | op: MLX4_CMD_HW2SW_CQ, |
4841 | timeout: MLX4_CMD_TIME_CLASS_A, |
4842 | native: MLX4_CMD_NATIVE); |
4843 | if (err) |
4844 | mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n" , |
4845 | slave, cqn); |
4846 | atomic_dec(v: &cq->mtt->ref_count); |
4847 | state = RES_CQ_ALLOCATED; |
4848 | break; |
4849 | |
4850 | default: |
4851 | state = 0; |
4852 | } |
4853 | } |
4854 | } |
4855 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4856 | } |
4857 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4858 | } |
4859 | |
4860 | static void rem_slave_mrs(struct mlx4_dev *dev, int slave) |
4861 | { |
4862 | struct mlx4_priv *priv = mlx4_priv(dev); |
4863 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
4864 | struct list_head *mpt_list = |
4865 | &tracker->slave_list[slave].res_list[RES_MPT]; |
4866 | struct res_mpt *mpt; |
4867 | struct res_mpt *tmp; |
4868 | int state; |
4869 | u64 in_param; |
4870 | int mptn; |
4871 | int err; |
4872 | |
4873 | err = move_all_busy(dev, slave, type: RES_MPT); |
4874 | if (err) |
4875 | mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n" , |
4876 | slave); |
4877 | |
4878 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4879 | list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { |
4880 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4881 | if (mpt->com.owner == slave) { |
4882 | mptn = mpt->com.res_id; |
4883 | state = mpt->com.from_state; |
4884 | while (state != 0) { |
4885 | switch (state) { |
4886 | case RES_MPT_RESERVED: |
4887 | __mlx4_mpt_release(dev, index: mpt->key); |
4888 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4889 | rb_erase(&mpt->com.node, |
4890 | &tracker->res_tree[RES_MPT]); |
4891 | list_del(entry: &mpt->com.list); |
4892 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4893 | mlx4_release_resource(dev, slave, |
4894 | res_type: RES_MPT, count: 1, port: 0); |
4895 | kfree(objp: mpt); |
4896 | state = 0; |
4897 | break; |
4898 | |
4899 | case RES_MPT_MAPPED: |
4900 | __mlx4_mpt_free_icm(dev, index: mpt->key); |
4901 | state = RES_MPT_RESERVED; |
4902 | break; |
4903 | |
4904 | case RES_MPT_HW: |
4905 | in_param = slave; |
4906 | err = mlx4_cmd(dev, in_param, in_modifier: mptn, op_modifier: 0, |
4907 | op: MLX4_CMD_HW2SW_MPT, |
4908 | timeout: MLX4_CMD_TIME_CLASS_A, |
4909 | native: MLX4_CMD_NATIVE); |
4910 | if (err) |
4911 | mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n" , |
4912 | slave, mptn); |
4913 | if (mpt->mtt) |
4914 | atomic_dec(v: &mpt->mtt->ref_count); |
4915 | state = RES_MPT_MAPPED; |
4916 | break; |
4917 | default: |
4918 | state = 0; |
4919 | } |
4920 | } |
4921 | } |
4922 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4923 | } |
4924 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4925 | } |
4926 | |
4927 | static void rem_slave_mtts(struct mlx4_dev *dev, int slave) |
4928 | { |
4929 | struct mlx4_priv *priv = mlx4_priv(dev); |
4930 | struct mlx4_resource_tracker *tracker = |
4931 | &priv->mfunc.master.res_tracker; |
4932 | struct list_head *mtt_list = |
4933 | &tracker->slave_list[slave].res_list[RES_MTT]; |
4934 | struct res_mtt *mtt; |
4935 | struct res_mtt *tmp; |
4936 | int state; |
4937 | int base; |
4938 | int err; |
4939 | |
4940 | err = move_all_busy(dev, slave, type: RES_MTT); |
4941 | if (err) |
4942 | mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n" , |
4943 | slave); |
4944 | |
4945 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4946 | list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { |
4947 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4948 | if (mtt->com.owner == slave) { |
4949 | base = mtt->com.res_id; |
4950 | state = mtt->com.from_state; |
4951 | while (state != 0) { |
4952 | switch (state) { |
4953 | case RES_MTT_ALLOCATED: |
4954 | __mlx4_free_mtt_range(dev, first_seg: base, |
4955 | order: mtt->order); |
4956 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4957 | rb_erase(&mtt->com.node, |
4958 | &tracker->res_tree[RES_MTT]); |
4959 | list_del(entry: &mtt->com.list); |
4960 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4961 | mlx4_release_resource(dev, slave, res_type: RES_MTT, |
4962 | count: 1 << mtt->order, port: 0); |
4963 | kfree(objp: mtt); |
4964 | state = 0; |
4965 | break; |
4966 | |
4967 | default: |
4968 | state = 0; |
4969 | } |
4970 | } |
4971 | } |
4972 | spin_lock_irq(lock: mlx4_tlock(dev)); |
4973 | } |
4974 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
4975 | } |
4976 | |
4977 | static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) |
4978 | { |
4979 | struct mlx4_cmd_mailbox *mailbox; |
4980 | int err; |
4981 | struct res_fs_rule *mirr_rule; |
4982 | u64 reg_id; |
4983 | |
4984 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
4985 | if (IS_ERR(ptr: mailbox)) |
4986 | return PTR_ERR(ptr: mailbox); |
4987 | |
4988 | if (!fs_rule->mirr_mbox) { |
4989 | mlx4_err(dev, "rule mirroring mailbox is null\n" ); |
4990 | mlx4_free_cmd_mailbox(dev, mailbox); |
4991 | return -EINVAL; |
4992 | } |
4993 | memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); |
4994 | err = mlx4_cmd_imm(dev, in_param: mailbox->dma, out_param: ®_id, in_modifier: fs_rule->mirr_mbox_size >> 2, op_modifier: 0, |
4995 | op: MLX4_QP_FLOW_STEERING_ATTACH, timeout: MLX4_CMD_TIME_CLASS_A, |
4996 | native: MLX4_CMD_NATIVE); |
4997 | mlx4_free_cmd_mailbox(dev, mailbox); |
4998 | |
4999 | if (err) |
5000 | goto err; |
5001 | |
5002 | err = add_res_range(dev, slave: fs_rule->com.owner, base: reg_id, count: 1, type: RES_FS_RULE, extra: fs_rule->qpn); |
5003 | if (err) |
5004 | goto err_detach; |
5005 | |
5006 | err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); |
5007 | if (err) |
5008 | goto err_rem; |
5009 | |
5010 | fs_rule->mirr_rule_id = reg_id; |
5011 | mirr_rule->mirr_rule_id = 0; |
5012 | mirr_rule->mirr_mbox_size = 0; |
5013 | mirr_rule->mirr_mbox = NULL; |
5014 | put_res(dev, slave: fs_rule->com.owner, res_id: reg_id, type: RES_FS_RULE); |
5015 | |
5016 | return 0; |
5017 | err_rem: |
5018 | rem_res_range(dev, slave: fs_rule->com.owner, base: reg_id, count: 1, type: RES_FS_RULE, extra: 0); |
5019 | err_detach: |
5020 | mlx4_cmd(dev, in_param: reg_id, in_modifier: 0, op_modifier: 0, op: MLX4_QP_FLOW_STEERING_DETACH, |
5021 | timeout: MLX4_CMD_TIME_CLASS_A, native: MLX4_CMD_NATIVE); |
5022 | err: |
5023 | return err; |
5024 | } |
5025 | |
5026 | static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) |
5027 | { |
5028 | struct mlx4_priv *priv = mlx4_priv(dev); |
5029 | struct mlx4_resource_tracker *tracker = |
5030 | &priv->mfunc.master.res_tracker; |
5031 | struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; |
5032 | struct rb_node *p; |
5033 | struct res_fs_rule *fs_rule; |
5034 | int err = 0; |
5035 | LIST_HEAD(mirr_list); |
5036 | |
5037 | for (p = rb_first(root); p; p = rb_next(p)) { |
5038 | fs_rule = rb_entry(p, struct res_fs_rule, com.node); |
5039 | if ((bond && fs_rule->mirr_mbox_size) || |
5040 | (!bond && !fs_rule->mirr_mbox_size)) |
5041 | list_add_tail(new: &fs_rule->mirr_list, head: &mirr_list); |
5042 | } |
5043 | |
5044 | list_for_each_entry(fs_rule, &mirr_list, mirr_list) { |
5045 | if (bond) |
5046 | err += mlx4_do_mirror_rule(dev, fs_rule); |
5047 | else |
5048 | err += mlx4_undo_mirror_rule(dev, fs_rule); |
5049 | } |
5050 | return err; |
5051 | } |
5052 | |
5053 | int mlx4_bond_fs_rules(struct mlx4_dev *dev) |
5054 | { |
5055 | return mlx4_mirror_fs_rules(dev, bond: true); |
5056 | } |
5057 | |
5058 | int mlx4_unbond_fs_rules(struct mlx4_dev *dev) |
5059 | { |
5060 | return mlx4_mirror_fs_rules(dev, bond: false); |
5061 | } |
5062 | |
5063 | static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) |
5064 | { |
5065 | struct mlx4_priv *priv = mlx4_priv(dev); |
5066 | struct mlx4_resource_tracker *tracker = |
5067 | &priv->mfunc.master.res_tracker; |
5068 | struct list_head *fs_rule_list = |
5069 | &tracker->slave_list[slave].res_list[RES_FS_RULE]; |
5070 | struct res_fs_rule *fs_rule; |
5071 | struct res_fs_rule *tmp; |
5072 | int state; |
5073 | u64 base; |
5074 | int err; |
5075 | |
5076 | err = move_all_busy(dev, slave, type: RES_FS_RULE); |
5077 | if (err) |
5078 | mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n" , |
5079 | slave); |
5080 | |
5081 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5082 | list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { |
5083 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5084 | if (fs_rule->com.owner == slave) { |
5085 | base = fs_rule->com.res_id; |
5086 | state = fs_rule->com.from_state; |
5087 | while (state != 0) { |
5088 | switch (state) { |
5089 | case RES_FS_RULE_ALLOCATED: |
5090 | /* detach rule */ |
5091 | err = mlx4_cmd(dev, in_param: base, in_modifier: 0, op_modifier: 0, |
5092 | op: MLX4_QP_FLOW_STEERING_DETACH, |
5093 | timeout: MLX4_CMD_TIME_CLASS_A, |
5094 | native: MLX4_CMD_NATIVE); |
5095 | |
5096 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5097 | rb_erase(&fs_rule->com.node, |
5098 | &tracker->res_tree[RES_FS_RULE]); |
5099 | list_del(entry: &fs_rule->com.list); |
5100 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5101 | kfree(objp: fs_rule->mirr_mbox); |
5102 | kfree(objp: fs_rule); |
5103 | state = 0; |
5104 | break; |
5105 | |
5106 | default: |
5107 | state = 0; |
5108 | } |
5109 | } |
5110 | } |
5111 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5112 | } |
5113 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5114 | } |
5115 | |
5116 | static void rem_slave_eqs(struct mlx4_dev *dev, int slave) |
5117 | { |
5118 | struct mlx4_priv *priv = mlx4_priv(dev); |
5119 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
5120 | struct list_head *eq_list = |
5121 | &tracker->slave_list[slave].res_list[RES_EQ]; |
5122 | struct res_eq *eq; |
5123 | struct res_eq *tmp; |
5124 | int err; |
5125 | int state; |
5126 | int eqn; |
5127 | |
5128 | err = move_all_busy(dev, slave, type: RES_EQ); |
5129 | if (err) |
5130 | mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n" , |
5131 | slave); |
5132 | |
5133 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5134 | list_for_each_entry_safe(eq, tmp, eq_list, com.list) { |
5135 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5136 | if (eq->com.owner == slave) { |
5137 | eqn = eq->com.res_id; |
5138 | state = eq->com.from_state; |
5139 | while (state != 0) { |
5140 | switch (state) { |
5141 | case RES_EQ_RESERVED: |
5142 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5143 | rb_erase(&eq->com.node, |
5144 | &tracker->res_tree[RES_EQ]); |
5145 | list_del(entry: &eq->com.list); |
5146 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5147 | kfree(objp: eq); |
5148 | state = 0; |
5149 | break; |
5150 | |
5151 | case RES_EQ_HW: |
5152 | err = mlx4_cmd(dev, in_param: slave, in_modifier: eqn & 0x3ff, |
5153 | op_modifier: 1, op: MLX4_CMD_HW2SW_EQ, |
5154 | timeout: MLX4_CMD_TIME_CLASS_A, |
5155 | native: MLX4_CMD_NATIVE); |
5156 | if (err) |
5157 | mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n" , |
5158 | slave, eqn & 0x3ff); |
5159 | atomic_dec(v: &eq->mtt->ref_count); |
5160 | state = RES_EQ_RESERVED; |
5161 | break; |
5162 | |
5163 | default: |
5164 | state = 0; |
5165 | } |
5166 | } |
5167 | } |
5168 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5169 | } |
5170 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5171 | } |
5172 | |
5173 | static void rem_slave_counters(struct mlx4_dev *dev, int slave) |
5174 | { |
5175 | struct mlx4_priv *priv = mlx4_priv(dev); |
5176 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
5177 | struct list_head *counter_list = |
5178 | &tracker->slave_list[slave].res_list[RES_COUNTER]; |
5179 | struct res_counter *counter; |
5180 | struct res_counter *tmp; |
5181 | int err; |
5182 | int *counters_arr = NULL; |
5183 | int i, j; |
5184 | |
5185 | err = move_all_busy(dev, slave, type: RES_COUNTER); |
5186 | if (err) |
5187 | mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n" , |
5188 | slave); |
5189 | |
5190 | counters_arr = kmalloc_array(n: dev->caps.max_counters, |
5191 | size: sizeof(*counters_arr), GFP_KERNEL); |
5192 | if (!counters_arr) |
5193 | return; |
5194 | |
5195 | do { |
5196 | i = 0; |
5197 | j = 0; |
5198 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5199 | list_for_each_entry_safe(counter, tmp, counter_list, com.list) { |
5200 | if (counter->com.owner == slave) { |
5201 | counters_arr[i++] = counter->com.res_id; |
5202 | rb_erase(&counter->com.node, |
5203 | &tracker->res_tree[RES_COUNTER]); |
5204 | list_del(entry: &counter->com.list); |
5205 | kfree(objp: counter); |
5206 | } |
5207 | } |
5208 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5209 | |
5210 | while (j < i) { |
5211 | __mlx4_counter_free(dev, idx: counters_arr[j++]); |
5212 | mlx4_release_resource(dev, slave, res_type: RES_COUNTER, count: 1, port: 0); |
5213 | } |
5214 | } while (i); |
5215 | |
5216 | kfree(objp: counters_arr); |
5217 | } |
5218 | |
5219 | static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) |
5220 | { |
5221 | struct mlx4_priv *priv = mlx4_priv(dev); |
5222 | struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; |
5223 | struct list_head *xrcdn_list = |
5224 | &tracker->slave_list[slave].res_list[RES_XRCD]; |
5225 | struct res_xrcdn *xrcd; |
5226 | struct res_xrcdn *tmp; |
5227 | int err; |
5228 | int xrcdn; |
5229 | |
5230 | err = move_all_busy(dev, slave, type: RES_XRCD); |
5231 | if (err) |
5232 | mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n" , |
5233 | slave); |
5234 | |
5235 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5236 | list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { |
5237 | if (xrcd->com.owner == slave) { |
5238 | xrcdn = xrcd->com.res_id; |
5239 | rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); |
5240 | list_del(entry: &xrcd->com.list); |
5241 | kfree(objp: xrcd); |
5242 | __mlx4_xrcd_free(dev, xrcdn); |
5243 | } |
5244 | } |
5245 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5246 | } |
5247 | |
5248 | void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) |
5249 | { |
5250 | struct mlx4_priv *priv = mlx4_priv(dev); |
5251 | mlx4_reset_roce_gids(dev, slave); |
5252 | mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
5253 | rem_slave_vlans(dev, slave); |
5254 | rem_slave_macs(dev, slave); |
5255 | rem_slave_fs_rule(dev, slave); |
5256 | rem_slave_qps(dev, slave); |
5257 | rem_slave_srqs(dev, slave); |
5258 | rem_slave_cqs(dev, slave); |
5259 | rem_slave_mrs(dev, slave); |
5260 | rem_slave_eqs(dev, slave); |
5261 | rem_slave_mtts(dev, slave); |
5262 | rem_slave_counters(dev, slave); |
5263 | rem_slave_xrcdns(dev, slave); |
5264 | mutex_unlock(lock: &priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
5265 | } |
5266 | |
5267 | static void update_qos_vpp(struct mlx4_update_qp_context *ctx, |
5268 | struct mlx4_vf_immed_vlan_work *work) |
5269 | { |
5270 | ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); |
5271 | ctx->qp_context.qos_vport = work->qos_vport; |
5272 | } |
5273 | |
5274 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) |
5275 | { |
5276 | struct mlx4_vf_immed_vlan_work *work = |
5277 | container_of(_work, struct mlx4_vf_immed_vlan_work, work); |
5278 | struct mlx4_cmd_mailbox *mailbox; |
5279 | struct mlx4_update_qp_context *upd_context; |
5280 | struct mlx4_dev *dev = &work->priv->dev; |
5281 | struct mlx4_resource_tracker *tracker = |
5282 | &work->priv->mfunc.master.res_tracker; |
5283 | struct list_head *qp_list = |
5284 | &tracker->slave_list[work->slave].res_list[RES_QP]; |
5285 | struct res_qp *qp; |
5286 | struct res_qp *tmp; |
5287 | u64 qp_path_mask_vlan_ctrl = |
5288 | ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | |
5289 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | |
5290 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | |
5291 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | |
5292 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | |
5293 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); |
5294 | |
5295 | u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | |
5296 | (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | |
5297 | (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | |
5298 | (1ULL << MLX4_UPD_QP_PATH_MASK_SV) | |
5299 | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | |
5300 | (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | |
5301 | (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | |
5302 | (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); |
5303 | |
5304 | int err; |
5305 | int port, errors = 0; |
5306 | u8 vlan_control; |
5307 | |
5308 | if (mlx4_is_slave(dev)) { |
5309 | mlx4_warn(dev, "Trying to update-qp in slave %d\n" , |
5310 | work->slave); |
5311 | goto out; |
5312 | } |
5313 | |
5314 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
5315 | if (IS_ERR(ptr: mailbox)) |
5316 | goto out; |
5317 | if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ |
5318 | vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
5319 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | |
5320 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | |
5321 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
5322 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | |
5323 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; |
5324 | else if (!work->vlan_id) |
5325 | vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
5326 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; |
5327 | else if (work->vlan_proto == htons(ETH_P_8021AD)) |
5328 | vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | |
5329 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
5330 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
5331 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
5332 | else /* vst 802.1Q */ |
5333 | vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | |
5334 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | |
5335 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
5336 | |
5337 | upd_context = mailbox->buf; |
5338 | upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); |
5339 | |
5340 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5341 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
5342 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5343 | if (qp->com.owner == work->slave) { |
5344 | if (qp->com.from_state != RES_QP_HW || |
5345 | !qp->sched_queue || /* no INIT2RTR trans yet */ |
5346 | mlx4_is_qp_reserved(dev, qpn: qp->local_qpn) || |
5347 | qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { |
5348 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5349 | continue; |
5350 | } |
5351 | port = (qp->sched_queue >> 6 & 1) + 1; |
5352 | if (port != work->port) { |
5353 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5354 | continue; |
5355 | } |
5356 | if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) |
5357 | upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); |
5358 | else |
5359 | upd_context->primary_addr_path_mask = |
5360 | cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); |
5361 | if (work->vlan_id == MLX4_VGT) { |
5362 | upd_context->qp_context.param3 = qp->param3; |
5363 | upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; |
5364 | upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; |
5365 | upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; |
5366 | upd_context->qp_context.pri_path.fl = qp->pri_path_fl; |
5367 | upd_context->qp_context.pri_path.feup = qp->feup; |
5368 | upd_context->qp_context.pri_path.sched_queue = |
5369 | qp->sched_queue; |
5370 | } else { |
5371 | upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); |
5372 | upd_context->qp_context.pri_path.vlan_control = vlan_control; |
5373 | upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; |
5374 | upd_context->qp_context.pri_path.fvl_rx = |
5375 | qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; |
5376 | upd_context->qp_context.pri_path.fl = |
5377 | qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN; |
5378 | if (work->vlan_proto == htons(ETH_P_8021AD)) |
5379 | upd_context->qp_context.pri_path.fl |= MLX4_FL_SV; |
5380 | else |
5381 | upd_context->qp_context.pri_path.fl |= MLX4_FL_CV; |
5382 | upd_context->qp_context.pri_path.feup = |
5383 | qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; |
5384 | upd_context->qp_context.pri_path.sched_queue = |
5385 | qp->sched_queue & 0xC7; |
5386 | upd_context->qp_context.pri_path.sched_queue |= |
5387 | ((work->qos & 0x7) << 3); |
5388 | |
5389 | if (dev->caps.flags2 & |
5390 | MLX4_DEV_CAP_FLAG2_QOS_VPP) |
5391 | update_qos_vpp(ctx: upd_context, work); |
5392 | } |
5393 | |
5394 | err = mlx4_cmd(dev, in_param: mailbox->dma, |
5395 | in_modifier: qp->local_qpn & 0xffffff, |
5396 | op_modifier: 0, op: MLX4_CMD_UPDATE_QP, |
5397 | timeout: MLX4_CMD_TIME_CLASS_C, native: MLX4_CMD_NATIVE); |
5398 | if (err) { |
5399 | mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n" , |
5400 | work->slave, port, qp->local_qpn, err); |
5401 | errors++; |
5402 | } |
5403 | } |
5404 | spin_lock_irq(lock: mlx4_tlock(dev)); |
5405 | } |
5406 | spin_unlock_irq(lock: mlx4_tlock(dev)); |
5407 | mlx4_free_cmd_mailbox(dev, mailbox); |
5408 | |
5409 | if (errors) |
5410 | mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n" , |
5411 | errors, work->slave, work->port); |
5412 | |
5413 | /* unregister previous vlan_id if needed and we had no errors |
5414 | * while updating the QPs |
5415 | */ |
5416 | if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && |
5417 | NO_INDX != work->orig_vlan_ix) |
5418 | __mlx4_unregister_vlan(dev: &work->priv->dev, port: work->port, |
5419 | vlan: work->orig_vlan_id); |
5420 | out: |
5421 | kfree(objp: work); |
5422 | return; |
5423 | } |
5424 | |