1 | /* |
2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * |
5 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU |
7 | * General Public License (GPL) Version 2, available from the file |
8 | * COPYING in the main directory of this source tree, or the |
9 | * OpenIB.org BSD license below: |
10 | * |
11 | * Redistribution and use in source and binary forms, with or |
12 | * without modification, are permitted provided that the following |
13 | * conditions are met: |
14 | * |
15 | * - Redistributions of source code must retain the above |
16 | * copyright notice, this list of conditions and the following |
17 | * disclaimer. |
18 | * |
19 | * - Redistributions in binary form must reproduce the above |
20 | * copyright notice, this list of conditions and the following |
21 | * disclaimer in the documentation and/or other materials |
22 | * provided with the distribution. |
23 | * |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
31 | * SOFTWARE. |
32 | */ |
33 | |
34 | #include <linux/interrupt.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/export.h> |
37 | #include <linux/mm.h> |
38 | #include <linux/dma-mapping.h> |
39 | |
40 | #include <linux/mlx4/cmd.h> |
41 | #include <linux/cpu_rmap.h> |
42 | |
43 | #include "mlx4.h" |
44 | #include "fw.h" |
45 | |
46 | enum { |
47 | MLX4_IRQNAME_SIZE = 32 |
48 | }; |
49 | |
50 | enum { |
51 | MLX4_NUM_ASYNC_EQE = 0x100, |
52 | MLX4_NUM_SPARE_EQE = 0x80, |
53 | MLX4_EQ_ENTRY_SIZE = 0x20 |
54 | }; |
55 | |
56 | #define MLX4_EQ_STATUS_OK ( 0 << 28) |
57 | #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) |
58 | #define MLX4_EQ_OWNER_SW ( 0 << 24) |
59 | #define MLX4_EQ_OWNER_HW ( 1 << 24) |
60 | #define MLX4_EQ_FLAG_EC ( 1 << 18) |
61 | #define MLX4_EQ_FLAG_OI ( 1 << 17) |
62 | #define MLX4_EQ_STATE_ARMED ( 9 << 8) |
63 | #define MLX4_EQ_STATE_FIRED (10 << 8) |
64 | #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) |
65 | |
66 | #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ |
67 | (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ |
68 | (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ |
69 | (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ |
70 | (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ |
71 | (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ |
72 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ |
73 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ |
74 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ |
75 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
76 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ |
77 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
78 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ |
79 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ |
80 | (1ull << MLX4_EVENT_TYPE_CMD) | \ |
81 | (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ |
82 | (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ |
83 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ |
84 | (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) |
85 | |
86 | static u64 get_async_ev_mask(struct mlx4_dev *dev) |
87 | { |
88 | u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; |
89 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) |
90 | async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); |
91 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) |
92 | async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); |
93 | |
94 | return async_ev_mask; |
95 | } |
96 | |
97 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
98 | { |
99 | __raw_writel(val: (__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | |
100 | req_not << 31), |
101 | addr: eq->doorbell); |
102 | /* We still want ordering, just not swabbing, so add a barrier */ |
103 | wmb(); |
104 | } |
105 | |
106 | static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, |
107 | u8 eqe_size) |
108 | { |
109 | /* (entry & (eq->nent - 1)) gives us a cyclic array */ |
110 | unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; |
111 | /* CX3 is capable of extending the EQE from 32 to 64 bytes with |
112 | * strides of 64B,128B and 256B. |
113 | * When 64B EQE is used, the first (in the lower addresses) |
114 | * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes |
115 | * contain the legacy EQE information. |
116 | * In all other cases, the first 32B contains the legacy EQE info. |
117 | */ |
118 | return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; |
119 | } |
120 | |
121 | static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) |
122 | { |
123 | struct mlx4_eqe *eqe = get_eqe(eq, entry: eq->cons_index, eqe_factor, eqe_size: size); |
124 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; |
125 | } |
126 | |
127 | static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) |
128 | { |
129 | struct mlx4_eqe *eqe = |
130 | &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; |
131 | return (!!(eqe->owner & 0x80) ^ |
132 | !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? |
133 | eqe : NULL; |
134 | } |
135 | |
136 | void mlx4_gen_slave_eqe(struct work_struct *work) |
137 | { |
138 | struct mlx4_mfunc_master_ctx *master = |
139 | container_of(work, struct mlx4_mfunc_master_ctx, |
140 | slave_event_work); |
141 | struct mlx4_mfunc *mfunc = |
142 | container_of(master, struct mlx4_mfunc, master); |
143 | struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); |
144 | struct mlx4_dev *dev = &priv->dev; |
145 | struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; |
146 | struct mlx4_eqe *eqe; |
147 | u8 slave; |
148 | int i, phys_port, slave_port; |
149 | |
150 | for (eqe = next_slave_event_eqe(slave_eq); eqe; |
151 | eqe = next_slave_event_eqe(slave_eq)) { |
152 | slave = eqe->slave_id; |
153 | |
154 | if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && |
155 | eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && |
156 | mlx4_is_bonded(dev)) { |
157 | struct mlx4_port_cap port_cap; |
158 | |
159 | if (!mlx4_QUERY_PORT(dev, port: 1, port_cap: &port_cap) && port_cap.link_state) |
160 | goto consume; |
161 | |
162 | if (!mlx4_QUERY_PORT(dev, port: 2, port_cap: &port_cap) && port_cap.link_state) |
163 | goto consume; |
164 | } |
165 | /* All active slaves need to receive the event */ |
166 | if (slave == ALL_SLAVES) { |
167 | for (i = 0; i <= dev->persist->num_vfs; i++) { |
168 | phys_port = 0; |
169 | if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT && |
170 | eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) { |
171 | phys_port = eqe->event.port_mgmt_change.port; |
172 | slave_port = mlx4_phys_to_slave_port(dev, slave: i, port: phys_port); |
173 | if (slave_port < 0) /* VF doesn't have this port */ |
174 | continue; |
175 | eqe->event.port_mgmt_change.port = slave_port; |
176 | } |
177 | if (mlx4_GEN_EQE(dev, slave: i, eqe)) |
178 | mlx4_warn(dev, "Failed to generate event for slave %d\n" , |
179 | i); |
180 | if (phys_port) |
181 | eqe->event.port_mgmt_change.port = phys_port; |
182 | } |
183 | } else { |
184 | if (mlx4_GEN_EQE(dev, slave, eqe)) |
185 | mlx4_warn(dev, "Failed to generate event for slave %d\n" , |
186 | slave); |
187 | } |
188 | consume: |
189 | ++slave_eq->cons; |
190 | } |
191 | } |
192 | |
193 | |
194 | static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) |
195 | { |
196 | struct mlx4_priv *priv = mlx4_priv(dev); |
197 | struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; |
198 | struct mlx4_eqe *s_eqe; |
199 | unsigned long flags; |
200 | |
201 | spin_lock_irqsave(&slave_eq->event_lock, flags); |
202 | s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; |
203 | if ((!!(s_eqe->owner & 0x80)) ^ |
204 | (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { |
205 | mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n" , |
206 | slave); |
207 | spin_unlock_irqrestore(lock: &slave_eq->event_lock, flags); |
208 | return; |
209 | } |
210 | |
211 | memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); |
212 | s_eqe->slave_id = slave; |
213 | /* ensure all information is written before setting the ownership bit */ |
214 | dma_wmb(); |
215 | s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; |
216 | ++slave_eq->prod; |
217 | |
218 | queue_work(wq: priv->mfunc.master.comm_wq, |
219 | work: &priv->mfunc.master.slave_event_work); |
220 | spin_unlock_irqrestore(lock: &slave_eq->event_lock, flags); |
221 | } |
222 | |
223 | static void mlx4_slave_event(struct mlx4_dev *dev, int slave, |
224 | struct mlx4_eqe *eqe) |
225 | { |
226 | struct mlx4_priv *priv = mlx4_priv(dev); |
227 | |
228 | if (slave < 0 || slave > dev->persist->num_vfs || |
229 | slave == dev->caps.function || |
230 | !priv->mfunc.master.slave_state[slave].active) |
231 | return; |
232 | |
233 | slave_event(dev, slave, eqe); |
234 | } |
235 | |
236 | #if defined(CONFIG_SMP) |
237 | static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) |
238 | { |
239 | int hint_err; |
240 | struct mlx4_dev *dev = &priv->dev; |
241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; |
242 | |
243 | if (!cpumask_available(mask: eq->affinity_mask) || |
244 | cpumask_empty(srcp: eq->affinity_mask)) |
245 | return; |
246 | |
247 | hint_err = irq_update_affinity_hint(irq: eq->irq, m: eq->affinity_mask); |
248 | if (hint_err) |
249 | mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n" , hint_err); |
250 | } |
251 | #endif |
252 | |
253 | int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) |
254 | { |
255 | struct mlx4_eqe eqe; |
256 | |
257 | struct mlx4_priv *priv = mlx4_priv(dev); |
258 | struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; |
259 | |
260 | if (!s_slave->active) |
261 | return 0; |
262 | |
263 | memset(&eqe, 0, sizeof(eqe)); |
264 | |
265 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; |
266 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; |
267 | eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); |
268 | |
269 | return mlx4_GEN_EQE(dev, slave, eqe: &eqe); |
270 | } |
271 | EXPORT_SYMBOL(mlx4_gen_pkey_eqe); |
272 | |
273 | int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) |
274 | { |
275 | struct mlx4_eqe eqe; |
276 | |
277 | /*don't send if we don't have the that slave */ |
278 | if (dev->persist->num_vfs < slave) |
279 | return 0; |
280 | memset(&eqe, 0, sizeof(eqe)); |
281 | |
282 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; |
283 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; |
284 | eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); |
285 | |
286 | return mlx4_GEN_EQE(dev, slave, eqe: &eqe); |
287 | } |
288 | EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); |
289 | |
290 | int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, |
291 | u8 port_subtype_change) |
292 | { |
293 | struct mlx4_eqe eqe; |
294 | u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); |
295 | |
296 | /*don't send if we don't have the that slave */ |
297 | if (dev->persist->num_vfs < slave) |
298 | return 0; |
299 | memset(&eqe, 0, sizeof(eqe)); |
300 | |
301 | eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; |
302 | eqe.subtype = port_subtype_change; |
303 | eqe.event.port_change.port = cpu_to_be32(slave_port << 28); |
304 | |
305 | mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n" , __func__, |
306 | port_subtype_change, slave, port); |
307 | return mlx4_GEN_EQE(dev, slave, eqe: &eqe); |
308 | } |
309 | EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); |
310 | |
311 | enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) |
312 | { |
313 | struct mlx4_priv *priv = mlx4_priv(dev); |
314 | struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; |
315 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
316 | |
317 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || |
318 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { |
319 | pr_err("%s: Error: asking for slave:%d, port:%d\n" , |
320 | __func__, slave, port); |
321 | return SLAVE_PORT_DOWN; |
322 | } |
323 | return s_state[slave].port_state[port]; |
324 | } |
325 | EXPORT_SYMBOL(mlx4_get_slave_port_state); |
326 | |
327 | static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, |
328 | enum slave_port_state state) |
329 | { |
330 | struct mlx4_priv *priv = mlx4_priv(dev); |
331 | struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; |
332 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
333 | |
334 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || |
335 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { |
336 | pr_err("%s: Error: asking for slave:%d, port:%d\n" , |
337 | __func__, slave, port); |
338 | return -1; |
339 | } |
340 | s_state[slave].port_state[port] = state; |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) |
346 | { |
347 | int i; |
348 | enum slave_port_gen_event gen_event; |
349 | struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, |
350 | port); |
351 | |
352 | for (i = 0; i < dev->persist->num_vfs + 1; i++) |
353 | if (test_bit(i, slaves_pport.slaves)) |
354 | set_and_calc_slave_port_state(dev, slave: i, port, |
355 | event, gen_event: &gen_event); |
356 | } |
357 | /************************************************************************** |
358 | The function get as input the new event to that port, |
359 | and according to the prev state change the slave's port state. |
360 | The events are: |
361 | MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, |
362 | MLX4_PORT_STATE_DEV_EVENT_PORT_UP |
363 | MLX4_PORT_STATE_IB_EVENT_GID_VALID |
364 | MLX4_PORT_STATE_IB_EVENT_GID_INVALID |
365 | ***************************************************************************/ |
366 | int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, |
367 | u8 port, int event, |
368 | enum slave_port_gen_event *gen_event) |
369 | { |
370 | struct mlx4_priv *priv = mlx4_priv(dev); |
371 | struct mlx4_slave_state *ctx = NULL; |
372 | unsigned long flags; |
373 | int ret = -1; |
374 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
375 | enum slave_port_state cur_state = |
376 | mlx4_get_slave_port_state(dev, slave, port); |
377 | |
378 | *gen_event = SLAVE_PORT_GEN_EVENT_NONE; |
379 | |
380 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || |
381 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { |
382 | pr_err("%s: Error: asking for slave:%d, port:%d\n" , |
383 | __func__, slave, port); |
384 | return ret; |
385 | } |
386 | |
387 | ctx = &priv->mfunc.master.slave_state[slave]; |
388 | spin_lock_irqsave(&ctx->lock, flags); |
389 | |
390 | switch (cur_state) { |
391 | case SLAVE_PORT_DOWN: |
392 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) |
393 | mlx4_set_slave_port_state(dev, slave, port, |
394 | state: SLAVE_PENDING_UP); |
395 | break; |
396 | case SLAVE_PENDING_UP: |
397 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) |
398 | mlx4_set_slave_port_state(dev, slave, port, |
399 | state: SLAVE_PORT_DOWN); |
400 | else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { |
401 | mlx4_set_slave_port_state(dev, slave, port, |
402 | state: SLAVE_PORT_UP); |
403 | *gen_event = SLAVE_PORT_GEN_EVENT_UP; |
404 | } |
405 | break; |
406 | case SLAVE_PORT_UP: |
407 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { |
408 | mlx4_set_slave_port_state(dev, slave, port, |
409 | state: SLAVE_PORT_DOWN); |
410 | *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; |
411 | } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == |
412 | event) { |
413 | mlx4_set_slave_port_state(dev, slave, port, |
414 | state: SLAVE_PENDING_UP); |
415 | *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; |
416 | } |
417 | break; |
418 | default: |
419 | pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n" , |
420 | __func__, slave, port); |
421 | goto out; |
422 | } |
423 | ret = mlx4_get_slave_port_state(dev, slave, port); |
424 | |
425 | out: |
426 | spin_unlock_irqrestore(lock: &ctx->lock, flags); |
427 | return ret; |
428 | } |
429 | |
430 | EXPORT_SYMBOL(set_and_calc_slave_port_state); |
431 | |
432 | int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) |
433 | { |
434 | struct mlx4_eqe eqe; |
435 | |
436 | memset(&eqe, 0, sizeof(eqe)); |
437 | |
438 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; |
439 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; |
440 | eqe.event.port_mgmt_change.port = port; |
441 | eqe.event.port_mgmt_change.params.port_info.changed_attr = |
442 | cpu_to_be32((u32) attr); |
443 | |
444 | slave_event(dev, ALL_SLAVES, eqe: &eqe); |
445 | return 0; |
446 | } |
447 | EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); |
448 | |
449 | void mlx4_master_handle_slave_flr(struct work_struct *work) |
450 | { |
451 | struct mlx4_mfunc_master_ctx *master = |
452 | container_of(work, struct mlx4_mfunc_master_ctx, |
453 | slave_flr_event_work); |
454 | struct mlx4_mfunc *mfunc = |
455 | container_of(master, struct mlx4_mfunc, master); |
456 | struct mlx4_priv *priv = |
457 | container_of(mfunc, struct mlx4_priv, mfunc); |
458 | struct mlx4_dev *dev = &priv->dev; |
459 | struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; |
460 | int i; |
461 | int err; |
462 | unsigned long flags; |
463 | |
464 | mlx4_dbg(dev, "mlx4_handle_slave_flr\n" ); |
465 | |
466 | for (i = 0 ; i < dev->num_slaves; i++) { |
467 | |
468 | if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { |
469 | mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n" , |
470 | i); |
471 | /* In case of 'Reset flow' FLR can be generated for |
472 | * a slave before mlx4_load_one is done. |
473 | * make sure interface is up before trying to delete |
474 | * slave resources which weren't allocated yet. |
475 | */ |
476 | if (dev->persist->interface_state & |
477 | MLX4_INTERFACE_STATE_UP) |
478 | mlx4_delete_all_resources_for_slave(dev, slave_id: i); |
479 | /*return the slave to running mode*/ |
480 | spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); |
481 | slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; |
482 | slave_state[i].is_slave_going_down = 0; |
483 | spin_unlock_irqrestore(lock: &priv->mfunc.master.slave_state_lock, flags); |
484 | /*notify the FW:*/ |
485 | err = mlx4_cmd(dev, in_param: 0, in_modifier: i, op_modifier: 0, op: MLX4_CMD_INFORM_FLR_DONE, |
486 | timeout: MLX4_CMD_TIME_CLASS_A, native: MLX4_CMD_WRAPPED); |
487 | if (err) |
488 | mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n" , |
489 | i); |
490 | } |
491 | } |
492 | } |
493 | |
494 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
495 | { |
496 | struct mlx4_priv *priv = mlx4_priv(dev); |
497 | struct mlx4_eqe *eqe; |
498 | int cqn; |
499 | int eqes_found = 0; |
500 | int set_ci = 0; |
501 | int port; |
502 | int slave = 0; |
503 | int ret; |
504 | int flr_slave; |
505 | u8 update_slave_state; |
506 | int i; |
507 | enum slave_port_gen_event gen_event; |
508 | unsigned long flags; |
509 | struct mlx4_vport_state *s_info; |
510 | int eqe_size = dev->caps.eqe_size; |
511 | |
512 | while ((eqe = next_eqe_sw(eq, eqe_factor: dev->caps.eqe_factor, size: eqe_size))) { |
513 | /* |
514 | * Make sure we read EQ entry contents after we've |
515 | * checked the ownership bit. |
516 | */ |
517 | dma_rmb(); |
518 | |
519 | switch (eqe->type) { |
520 | case MLX4_EVENT_TYPE_COMP: |
521 | cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; |
522 | mlx4_cq_completion(dev, cqn); |
523 | break; |
524 | |
525 | case MLX4_EVENT_TYPE_PATH_MIG: |
526 | case MLX4_EVENT_TYPE_COMM_EST: |
527 | case MLX4_EVENT_TYPE_SQ_DRAINED: |
528 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: |
529 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: |
530 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: |
531 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
532 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: |
533 | mlx4_dbg(dev, "event %d arrived\n" , eqe->type); |
534 | if (mlx4_is_master(dev)) { |
535 | /* forward only to slave owning the QP */ |
536 | ret = mlx4_get_slave_from_resource_id(dev, |
537 | resource_type: RES_QP, |
538 | be32_to_cpu(eqe->event.qp.qpn) |
539 | & 0xffffff, slave: &slave); |
540 | if (ret && ret != -ENOENT) { |
541 | mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n" , |
542 | eqe->type, eqe->subtype, |
543 | eq->eqn, eq->cons_index, ret); |
544 | break; |
545 | } |
546 | |
547 | if (!ret && slave != dev->caps.function) { |
548 | mlx4_slave_event(dev, slave, eqe); |
549 | break; |
550 | } |
551 | |
552 | } |
553 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & |
554 | 0xffffff, event_type: eqe->type); |
555 | break; |
556 | |
557 | case MLX4_EVENT_TYPE_SRQ_LIMIT: |
558 | mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n" , |
559 | __func__, be32_to_cpu(eqe->event.srq.srqn), |
560 | eq->eqn); |
561 | fallthrough; |
562 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: |
563 | if (mlx4_is_master(dev)) { |
564 | /* forward only to slave owning the SRQ */ |
565 | ret = mlx4_get_slave_from_resource_id(dev, |
566 | resource_type: RES_SRQ, |
567 | be32_to_cpu(eqe->event.srq.srqn) |
568 | & 0xffffff, |
569 | slave: &slave); |
570 | if (ret && ret != -ENOENT) { |
571 | mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n" , |
572 | eqe->type, eqe->subtype, |
573 | eq->eqn, eq->cons_index, ret); |
574 | break; |
575 | } |
576 | if (eqe->type == |
577 | MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) |
578 | mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n" , |
579 | __func__, slave, |
580 | be32_to_cpu(eqe->event.srq.srqn), |
581 | eqe->type, eqe->subtype); |
582 | |
583 | if (!ret && slave != dev->caps.function) { |
584 | if (eqe->type == |
585 | MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) |
586 | mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n" , |
587 | __func__, eqe->type, |
588 | eqe->subtype, slave); |
589 | mlx4_slave_event(dev, slave, eqe); |
590 | break; |
591 | } |
592 | } |
593 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & |
594 | 0xffffff, event_type: eqe->type); |
595 | break; |
596 | |
597 | case MLX4_EVENT_TYPE_CMD: |
598 | mlx4_cmd_event(dev, |
599 | be16_to_cpu(eqe->event.cmd.token), |
600 | status: eqe->event.cmd.status, |
601 | be64_to_cpu(eqe->event.cmd.out_param)); |
602 | break; |
603 | |
604 | case MLX4_EVENT_TYPE_PORT_CHANGE: { |
605 | struct mlx4_slaves_pport slaves_port; |
606 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; |
607 | slaves_port = mlx4_phys_to_slaves_pport(dev, port); |
608 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { |
609 | mlx4_dispatch_event( |
610 | dev, type: MLX4_DEV_EVENT_PORT_DOWN, param: &port); |
611 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; |
612 | if (!mlx4_is_master(dev)) |
613 | break; |
614 | for (i = 0; i < dev->persist->num_vfs + 1; |
615 | i++) { |
616 | int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, slave: i, port); |
617 | |
618 | if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) |
619 | continue; |
620 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { |
621 | if (i == mlx4_master_func_num(dev)) |
622 | continue; |
623 | mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n" , |
624 | __func__, i, port); |
625 | s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
626 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
627 | eqe->event.port_change.port = |
628 | cpu_to_be32( |
629 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) |
630 | | (reported_port << 28)); |
631 | mlx4_slave_event(dev, slave: i, eqe); |
632 | } |
633 | } else { /* IB port */ |
634 | set_and_calc_slave_port_state(dev, i, port, |
635 | MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, |
636 | &gen_event); |
637 | /*we can be in pending state, then do not send port_down event*/ |
638 | if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { |
639 | if (i == mlx4_master_func_num(dev)) |
640 | continue; |
641 | eqe->event.port_change.port = |
642 | cpu_to_be32( |
643 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) |
644 | | (mlx4_phys_to_slave_port(dev, i, port) << 28)); |
645 | mlx4_slave_event(dev, slave: i, eqe); |
646 | } |
647 | } |
648 | } |
649 | } else { |
650 | mlx4_dispatch_event(dev, type: MLX4_DEV_EVENT_PORT_UP, |
651 | param: &port); |
652 | |
653 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; |
654 | |
655 | if (!mlx4_is_master(dev)) |
656 | break; |
657 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) |
658 | for (i = 0; |
659 | i < dev->persist->num_vfs + 1; |
660 | i++) { |
661 | int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, slave: i, port); |
662 | |
663 | if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) |
664 | continue; |
665 | if (i == mlx4_master_func_num(dev)) |
666 | continue; |
667 | s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
668 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
669 | eqe->event.port_change.port = |
670 | cpu_to_be32( |
671 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) |
672 | | (reported_port << 28)); |
673 | mlx4_slave_event(dev, slave: i, eqe); |
674 | } |
675 | } |
676 | else /* IB port */ |
677 | /* port-up event will be sent to a slave when the |
678 | * slave's alias-guid is set. This is done in alias_GUID.c |
679 | */ |
680 | set_all_slave_state(dev, port, event: MLX4_DEV_EVENT_PORT_UP); |
681 | } |
682 | break; |
683 | } |
684 | |
685 | case MLX4_EVENT_TYPE_CQ_ERROR: |
686 | mlx4_warn(dev, "CQ %s on CQN %06x\n" , |
687 | eqe->event.cq_err.syndrome == 1 ? |
688 | "overrun" : "access violation" , |
689 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); |
690 | if (mlx4_is_master(dev)) { |
691 | ret = mlx4_get_slave_from_resource_id(dev, |
692 | resource_type: RES_CQ, |
693 | be32_to_cpu(eqe->event.cq_err.cqn) |
694 | & 0xffffff, slave: &slave); |
695 | if (ret && ret != -ENOENT) { |
696 | mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n" , |
697 | eqe->type, eqe->subtype, |
698 | eq->eqn, eq->cons_index, ret); |
699 | break; |
700 | } |
701 | |
702 | if (!ret && slave != dev->caps.function) { |
703 | mlx4_slave_event(dev, slave, eqe); |
704 | break; |
705 | } |
706 | } |
707 | mlx4_cq_event(dev, |
708 | be32_to_cpu(eqe->event.cq_err.cqn) |
709 | & 0xffffff, |
710 | event_type: eqe->type); |
711 | break; |
712 | |
713 | case MLX4_EVENT_TYPE_EQ_OVERFLOW: |
714 | mlx4_warn(dev, "EQ overrun on EQN %d\n" , eq->eqn); |
715 | break; |
716 | |
717 | case MLX4_EVENT_TYPE_OP_REQUIRED: |
718 | atomic_inc(v: &priv->opreq_count); |
719 | /* FW commands can't be executed from interrupt context |
720 | * working in deferred task |
721 | */ |
722 | queue_work(wq: mlx4_wq, work: &priv->opreq_task); |
723 | break; |
724 | |
725 | case MLX4_EVENT_TYPE_COMM_CHANNEL: |
726 | if (!mlx4_is_master(dev)) { |
727 | mlx4_warn(dev, "Received comm channel event for non master device\n" ); |
728 | break; |
729 | } |
730 | memcpy(&priv->mfunc.master.comm_arm_bit_vector, |
731 | eqe->event.comm_channel_arm.bit_vec, |
732 | sizeof(eqe->event.comm_channel_arm.bit_vec)); |
733 | queue_work(wq: priv->mfunc.master.comm_wq, |
734 | work: &priv->mfunc.master.comm_work); |
735 | break; |
736 | |
737 | case MLX4_EVENT_TYPE_FLR_EVENT: |
738 | flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); |
739 | if (!mlx4_is_master(dev)) { |
740 | mlx4_warn(dev, "Non-master function received FLR event\n" ); |
741 | break; |
742 | } |
743 | |
744 | mlx4_dbg(dev, "FLR event for slave: %d\n" , flr_slave); |
745 | |
746 | if (flr_slave >= dev->num_slaves) { |
747 | mlx4_warn(dev, |
748 | "Got FLR for unknown function: %d\n" , |
749 | flr_slave); |
750 | update_slave_state = 0; |
751 | } else |
752 | update_slave_state = 1; |
753 | |
754 | spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); |
755 | if (update_slave_state) { |
756 | priv->mfunc.master.slave_state[flr_slave].active = false; |
757 | priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; |
758 | priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; |
759 | } |
760 | spin_unlock_irqrestore(lock: &priv->mfunc.master.slave_state_lock, flags); |
761 | mlx4_dispatch_event(dev, type: MLX4_DEV_EVENT_SLAVE_SHUTDOWN, |
762 | param: &flr_slave); |
763 | queue_work(wq: priv->mfunc.master.comm_wq, |
764 | work: &priv->mfunc.master.slave_flr_event_work); |
765 | break; |
766 | |
767 | case MLX4_EVENT_TYPE_FATAL_WARNING: |
768 | if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { |
769 | if (mlx4_is_master(dev)) |
770 | for (i = 0; i < dev->num_slaves; i++) { |
771 | mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n" , |
772 | __func__, i); |
773 | if (i == dev->caps.function) |
774 | continue; |
775 | mlx4_slave_event(dev, slave: i, eqe); |
776 | } |
777 | mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n" , |
778 | be16_to_cpu(eqe->event.warming.warning_threshold), |
779 | be16_to_cpu(eqe->event.warming.current_temperature)); |
780 | } else |
781 | mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n" , |
782 | eqe->type, eqe->subtype, eq->eqn, |
783 | eq->cons_index, eqe->owner, eq->nent, |
784 | eqe->slave_id, |
785 | !!(eqe->owner & 0x80) ^ |
786 | !!(eq->cons_index & eq->nent) ? "HW" : "SW" ); |
787 | |
788 | break; |
789 | |
790 | case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: |
791 | mlx4_dispatch_event( |
792 | dev, type: MLX4_DEV_EVENT_PORT_MGMT_CHANGE, param: eqe); |
793 | break; |
794 | |
795 | case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: |
796 | switch (eqe->subtype) { |
797 | case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: |
798 | mlx4_warn(dev, "Bad cable detected on port %u\n" , |
799 | eqe->event.bad_cable.port); |
800 | break; |
801 | case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: |
802 | mlx4_warn(dev, "Unsupported cable detected\n" ); |
803 | break; |
804 | default: |
805 | mlx4_dbg(dev, |
806 | "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n" , |
807 | eqe->type, eqe->subtype, eq->eqn, |
808 | eq->cons_index, eqe->owner, eq->nent, |
809 | !!(eqe->owner & 0x80) ^ |
810 | !!(eq->cons_index & eq->nent) ? "HW" : "SW" ); |
811 | break; |
812 | } |
813 | break; |
814 | |
815 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: |
816 | case MLX4_EVENT_TYPE_ECC_DETECT: |
817 | default: |
818 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n" , |
819 | eqe->type, eqe->subtype, eq->eqn, |
820 | eq->cons_index, eqe->owner, eq->nent, |
821 | eqe->slave_id, |
822 | !!(eqe->owner & 0x80) ^ |
823 | !!(eq->cons_index & eq->nent) ? "HW" : "SW" ); |
824 | break; |
825 | } |
826 | |
827 | ++eq->cons_index; |
828 | eqes_found = 1; |
829 | ++set_ci; |
830 | |
831 | /* |
832 | * The HCA will think the queue has overflowed if we |
833 | * don't tell it we've been processing events. We |
834 | * create our EQs with MLX4_NUM_SPARE_EQE extra |
835 | * entries, so we must update our consumer index at |
836 | * least that often. |
837 | */ |
838 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { |
839 | eq_set_ci(eq, req_not: 0); |
840 | set_ci = 0; |
841 | } |
842 | } |
843 | |
844 | eq_set_ci(eq, req_not: 1); |
845 | |
846 | return eqes_found; |
847 | } |
848 | |
849 | static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) |
850 | { |
851 | struct mlx4_dev *dev = dev_ptr; |
852 | struct mlx4_priv *priv = mlx4_priv(dev); |
853 | int work = 0; |
854 | int i; |
855 | |
856 | writel(val: priv->eq_table.clr_mask, addr: priv->eq_table.clr_int); |
857 | |
858 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
859 | work |= mlx4_eq_int(dev, eq: &priv->eq_table.eq[i]); |
860 | |
861 | return IRQ_RETVAL(work); |
862 | } |
863 | |
864 | static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) |
865 | { |
866 | struct mlx4_eq *eq = eq_ptr; |
867 | struct mlx4_dev *dev = eq->dev; |
868 | |
869 | mlx4_eq_int(dev, eq); |
870 | |
871 | /* MSI-X vectors always belong to us */ |
872 | return IRQ_HANDLED; |
873 | } |
874 | |
875 | int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, |
876 | struct mlx4_vhcr *vhcr, |
877 | struct mlx4_cmd_mailbox *inbox, |
878 | struct mlx4_cmd_mailbox *outbox, |
879 | struct mlx4_cmd_info *cmd) |
880 | { |
881 | struct mlx4_priv *priv = mlx4_priv(dev); |
882 | struct mlx4_slave_event_eq_info *event_eq = |
883 | priv->mfunc.master.slave_state[slave].event_eq; |
884 | u32 in_modifier = vhcr->in_modifier; |
885 | u32 eqn = in_modifier & 0x3FF; |
886 | u64 in_param = vhcr->in_param; |
887 | int err = 0; |
888 | int i; |
889 | |
890 | if (slave == dev->caps.function) |
891 | err = mlx4_cmd(dev, in_param, in_modifier: (in_modifier & 0x80000000) | eqn, |
892 | op_modifier: 0, op: MLX4_CMD_MAP_EQ, timeout: MLX4_CMD_TIME_CLASS_B, |
893 | native: MLX4_CMD_NATIVE); |
894 | if (!err) |
895 | for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) |
896 | if (in_param & (1LL << i)) |
897 | event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; |
898 | |
899 | return err; |
900 | } |
901 | |
902 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
903 | int eq_num) |
904 | { |
905 | return mlx4_cmd(dev, in_param: event_mask, in_modifier: (unmap << 31) | eq_num, |
906 | op_modifier: 0, op: MLX4_CMD_MAP_EQ, timeout: MLX4_CMD_TIME_CLASS_B, |
907 | native: MLX4_CMD_WRAPPED); |
908 | } |
909 | |
910 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
911 | int eq_num) |
912 | { |
913 | return mlx4_cmd(dev, in_param: mailbox->dma, in_modifier: eq_num, op_modifier: 0, |
914 | op: MLX4_CMD_SW2HW_EQ, timeout: MLX4_CMD_TIME_CLASS_A, |
915 | native: MLX4_CMD_WRAPPED); |
916 | } |
917 | |
918 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) |
919 | { |
920 | return mlx4_cmd(dev, in_param: 0, in_modifier: eq_num, op_modifier: 1, op: MLX4_CMD_HW2SW_EQ, |
921 | timeout: MLX4_CMD_TIME_CLASS_A, native: MLX4_CMD_WRAPPED); |
922 | } |
923 | |
924 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) |
925 | { |
926 | /* |
927 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs |
928 | * we need to map, take the difference of highest index and |
929 | * the lowest index we'll use and add 1. |
930 | */ |
931 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - |
932 | dev->caps.reserved_eqs / 4 + 1; |
933 | } |
934 | |
935 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
936 | { |
937 | struct mlx4_priv *priv = mlx4_priv(dev); |
938 | int index; |
939 | |
940 | index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; |
941 | |
942 | if (!priv->eq_table.uar_map[index]) { |
943 | priv->eq_table.uar_map[index] = |
944 | ioremap( |
945 | pci_resource_start(dev->persist->pdev, 2) + |
946 | ((eq->eqn / 4) << (dev->uar_page_shift)), |
947 | size: (1 << (dev->uar_page_shift))); |
948 | if (!priv->eq_table.uar_map[index]) { |
949 | mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n" , |
950 | eq->eqn); |
951 | return NULL; |
952 | } |
953 | } |
954 | |
955 | return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); |
956 | } |
957 | |
958 | static void mlx4_unmap_uar(struct mlx4_dev *dev) |
959 | { |
960 | struct mlx4_priv *priv = mlx4_priv(dev); |
961 | int i; |
962 | |
963 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
964 | if (priv->eq_table.uar_map[i]) { |
965 | iounmap(addr: priv->eq_table.uar_map[i]); |
966 | priv->eq_table.uar_map[i] = NULL; |
967 | } |
968 | } |
969 | |
970 | static int mlx4_create_eq(struct mlx4_dev *dev, int nent, |
971 | u8 intr, struct mlx4_eq *eq) |
972 | { |
973 | struct mlx4_priv *priv = mlx4_priv(dev); |
974 | struct mlx4_cmd_mailbox *mailbox; |
975 | struct mlx4_eq_context *eq_context; |
976 | int npages; |
977 | u64 *dma_list = NULL; |
978 | dma_addr_t t; |
979 | u64 mtt_addr; |
980 | int err = -ENOMEM; |
981 | int i; |
982 | |
983 | eq->dev = dev; |
984 | eq->nent = roundup_pow_of_two(max(nent, 2)); |
985 | /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with |
986 | * strides of 64B,128B and 256B. |
987 | */ |
988 | npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; |
989 | |
990 | eq->page_list = kmalloc_array(n: npages, size: sizeof(*eq->page_list), |
991 | GFP_KERNEL); |
992 | if (!eq->page_list) |
993 | goto err_out; |
994 | |
995 | for (i = 0; i < npages; ++i) |
996 | eq->page_list[i].buf = NULL; |
997 | |
998 | dma_list = kmalloc_array(n: npages, size: sizeof(*dma_list), GFP_KERNEL); |
999 | if (!dma_list) |
1000 | goto err_out_free; |
1001 | |
1002 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1003 | if (IS_ERR(ptr: mailbox)) |
1004 | goto err_out_free; |
1005 | eq_context = mailbox->buf; |
1006 | |
1007 | for (i = 0; i < npages; ++i) { |
1008 | eq->page_list[i].buf = dma_alloc_coherent(dev: &dev->persist-> |
1009 | pdev->dev, |
1010 | PAGE_SIZE, dma_handle: &t, |
1011 | GFP_KERNEL); |
1012 | if (!eq->page_list[i].buf) |
1013 | goto err_out_free_pages; |
1014 | |
1015 | dma_list[i] = t; |
1016 | eq->page_list[i].map = t; |
1017 | } |
1018 | |
1019 | eq->eqn = mlx4_bitmap_alloc(bitmap: &priv->eq_table.bitmap); |
1020 | if (eq->eqn == -1) |
1021 | goto err_out_free_pages; |
1022 | |
1023 | eq->doorbell = mlx4_get_eq_uar(dev, eq); |
1024 | if (!eq->doorbell) { |
1025 | err = -ENOMEM; |
1026 | goto err_out_free_eq; |
1027 | } |
1028 | |
1029 | err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, mtt: &eq->mtt); |
1030 | if (err) |
1031 | goto err_out_free_eq; |
1032 | |
1033 | err = mlx4_write_mtt(dev, mtt: &eq->mtt, start_index: 0, npages, page_list: dma_list); |
1034 | if (err) |
1035 | goto err_out_free_mtt; |
1036 | |
1037 | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | |
1038 | MLX4_EQ_STATE_ARMED); |
1039 | eq_context->log_eq_size = ilog2(eq->nent); |
1040 | eq_context->intr = intr; |
1041 | eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; |
1042 | |
1043 | mtt_addr = mlx4_mtt_addr(dev, mtt: &eq->mtt); |
1044 | eq_context->mtt_base_addr_h = mtt_addr >> 32; |
1045 | eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); |
1046 | |
1047 | err = mlx4_SW2HW_EQ(dev, mailbox, eq_num: eq->eqn); |
1048 | if (err) { |
1049 | mlx4_warn(dev, "SW2HW_EQ failed (%d)\n" , err); |
1050 | goto err_out_free_mtt; |
1051 | } |
1052 | |
1053 | kfree(objp: dma_list); |
1054 | mlx4_free_cmd_mailbox(dev, mailbox); |
1055 | |
1056 | eq->cons_index = 0; |
1057 | |
1058 | INIT_LIST_HEAD(list: &eq->tasklet_ctx.list); |
1059 | INIT_LIST_HEAD(list: &eq->tasklet_ctx.process_list); |
1060 | spin_lock_init(&eq->tasklet_ctx.lock); |
1061 | tasklet_setup(t: &eq->tasklet_ctx.task, callback: mlx4_cq_tasklet_cb); |
1062 | |
1063 | return err; |
1064 | |
1065 | err_out_free_mtt: |
1066 | mlx4_mtt_cleanup(dev, mtt: &eq->mtt); |
1067 | |
1068 | err_out_free_eq: |
1069 | mlx4_bitmap_free(bitmap: &priv->eq_table.bitmap, obj: eq->eqn, use_rr: MLX4_USE_RR); |
1070 | |
1071 | err_out_free_pages: |
1072 | for (i = 0; i < npages; ++i) |
1073 | if (eq->page_list[i].buf) |
1074 | dma_free_coherent(dev: &dev->persist->pdev->dev, PAGE_SIZE, |
1075 | cpu_addr: eq->page_list[i].buf, |
1076 | dma_handle: eq->page_list[i].map); |
1077 | |
1078 | mlx4_free_cmd_mailbox(dev, mailbox); |
1079 | |
1080 | err_out_free: |
1081 | kfree(objp: eq->page_list); |
1082 | kfree(objp: dma_list); |
1083 | |
1084 | err_out: |
1085 | return err; |
1086 | } |
1087 | |
1088 | static void mlx4_free_eq(struct mlx4_dev *dev, |
1089 | struct mlx4_eq *eq) |
1090 | { |
1091 | struct mlx4_priv *priv = mlx4_priv(dev); |
1092 | int err; |
1093 | int i; |
1094 | /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with |
1095 | * strides of 64B,128B and 256B |
1096 | */ |
1097 | int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; |
1098 | |
1099 | err = mlx4_HW2SW_EQ(dev, eq_num: eq->eqn); |
1100 | if (err) |
1101 | mlx4_warn(dev, "HW2SW_EQ failed (%d)\n" , err); |
1102 | |
1103 | synchronize_irq(irq: eq->irq); |
1104 | tasklet_disable(t: &eq->tasklet_ctx.task); |
1105 | |
1106 | mlx4_mtt_cleanup(dev, mtt: &eq->mtt); |
1107 | for (i = 0; i < npages; ++i) |
1108 | dma_free_coherent(dev: &dev->persist->pdev->dev, PAGE_SIZE, |
1109 | cpu_addr: eq->page_list[i].buf, |
1110 | dma_handle: eq->page_list[i].map); |
1111 | |
1112 | kfree(objp: eq->page_list); |
1113 | mlx4_bitmap_free(bitmap: &priv->eq_table.bitmap, obj: eq->eqn, use_rr: MLX4_USE_RR); |
1114 | } |
1115 | |
1116 | static void mlx4_free_irqs(struct mlx4_dev *dev) |
1117 | { |
1118 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; |
1119 | int i; |
1120 | |
1121 | if (eq_table->have_irq) |
1122 | free_irq(dev->persist->pdev->irq, dev); |
1123 | |
1124 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
1125 | if (eq_table->eq[i].have_irq) { |
1126 | free_cpumask_var(mask: eq_table->eq[i].affinity_mask); |
1127 | irq_update_affinity_hint(irq: eq_table->eq[i].irq, NULL); |
1128 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
1129 | eq_table->eq[i].have_irq = 0; |
1130 | } |
1131 | |
1132 | kfree(objp: eq_table->irq_names); |
1133 | } |
1134 | |
1135 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
1136 | { |
1137 | struct mlx4_priv *priv = mlx4_priv(dev); |
1138 | |
1139 | priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, |
1140 | priv->fw.clr_int_bar) + |
1141 | priv->fw.clr_int_base, size: MLX4_CLR_INT_SIZE); |
1142 | if (!priv->clr_base) { |
1143 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n" ); |
1144 | return -ENOMEM; |
1145 | } |
1146 | |
1147 | return 0; |
1148 | } |
1149 | |
1150 | static void mlx4_unmap_clr_int(struct mlx4_dev *dev) |
1151 | { |
1152 | struct mlx4_priv *priv = mlx4_priv(dev); |
1153 | |
1154 | iounmap(addr: priv->clr_base); |
1155 | } |
1156 | |
1157 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
1158 | { |
1159 | struct mlx4_priv *priv = mlx4_priv(dev); |
1160 | |
1161 | priv->eq_table.eq = kcalloc(n: dev->caps.num_eqs - dev->caps.reserved_eqs, |
1162 | size: sizeof(*priv->eq_table.eq), GFP_KERNEL); |
1163 | if (!priv->eq_table.eq) |
1164 | return -ENOMEM; |
1165 | |
1166 | return 0; |
1167 | } |
1168 | |
1169 | void mlx4_free_eq_table(struct mlx4_dev *dev) |
1170 | { |
1171 | kfree(objp: mlx4_priv(dev)->eq_table.eq); |
1172 | } |
1173 | |
1174 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
1175 | { |
1176 | struct mlx4_priv *priv = mlx4_priv(dev); |
1177 | int err; |
1178 | int i; |
1179 | |
1180 | priv->eq_table.uar_map = kcalloc(n: mlx4_num_eq_uar(dev), |
1181 | size: sizeof(*priv->eq_table.uar_map), |
1182 | GFP_KERNEL); |
1183 | if (!priv->eq_table.uar_map) { |
1184 | err = -ENOMEM; |
1185 | goto err_out_free; |
1186 | } |
1187 | |
1188 | err = mlx4_bitmap_init(bitmap: &priv->eq_table.bitmap, |
1189 | roundup_pow_of_two(dev->caps.num_eqs), |
1190 | mask: dev->caps.num_eqs - 1, |
1191 | reserved_bot: dev->caps.reserved_eqs, |
1192 | roundup_pow_of_two(dev->caps.num_eqs) - |
1193 | dev->caps.num_eqs); |
1194 | if (err) |
1195 | goto err_out_free; |
1196 | |
1197 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
1198 | priv->eq_table.uar_map[i] = NULL; |
1199 | |
1200 | if (!mlx4_is_slave(dev)) { |
1201 | err = mlx4_map_clr_int(dev); |
1202 | if (err) |
1203 | goto err_out_bitmap; |
1204 | |
1205 | priv->eq_table.clr_mask = |
1206 | swab32(1 << (priv->eq_table.inta_pin & 31)); |
1207 | priv->eq_table.clr_int = priv->clr_base + |
1208 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
1209 | } |
1210 | |
1211 | priv->eq_table.irq_names = |
1212 | kmalloc_array(n: MLX4_IRQNAME_SIZE, |
1213 | size: (dev->caps.num_comp_vectors + 1), |
1214 | GFP_KERNEL); |
1215 | if (!priv->eq_table.irq_names) { |
1216 | err = -ENOMEM; |
1217 | goto err_out_clr_int; |
1218 | } |
1219 | |
1220 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
1221 | if (i == MLX4_EQ_ASYNC) { |
1222 | err = mlx4_create_eq(dev, |
1223 | nent: MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, |
1224 | intr: 0, eq: &priv->eq_table.eq[MLX4_EQ_ASYNC]); |
1225 | } else { |
1226 | struct mlx4_eq *eq = &priv->eq_table.eq[i]; |
1227 | #ifdef CONFIG_RFS_ACCEL |
1228 | int port = find_first_bit(addr: eq->actv_ports.ports, |
1229 | size: dev->caps.num_ports) + 1; |
1230 | |
1231 | if (port <= dev->caps.num_ports) { |
1232 | struct mlx4_port_info *info = |
1233 | &mlx4_priv(dev)->port[port]; |
1234 | |
1235 | if (!info->rmap) { |
1236 | info->rmap = alloc_irq_cpu_rmap( |
1237 | size: mlx4_get_eqs_per_port(dev, port)); |
1238 | if (!info->rmap) { |
1239 | mlx4_warn(dev, "Failed to allocate cpu rmap\n" ); |
1240 | err = -ENOMEM; |
1241 | goto err_out_unmap; |
1242 | } |
1243 | } |
1244 | |
1245 | err = irq_cpu_rmap_add( |
1246 | rmap: info->rmap, irq: eq->irq); |
1247 | if (err) |
1248 | mlx4_warn(dev, "Failed adding irq rmap\n" ); |
1249 | } |
1250 | #endif |
1251 | err = mlx4_create_eq(dev, nent: dev->quotas.cq + |
1252 | MLX4_NUM_SPARE_EQE, |
1253 | intr: (dev->flags & MLX4_FLAG_MSI_X) ? |
1254 | i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, |
1255 | eq); |
1256 | } |
1257 | if (err) |
1258 | goto err_out_unmap; |
1259 | } |
1260 | |
1261 | if (dev->flags & MLX4_FLAG_MSI_X) { |
1262 | const char *eq_name; |
1263 | |
1264 | snprintf(buf: priv->eq_table.irq_names + |
1265 | MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, |
1266 | size: MLX4_IRQNAME_SIZE, |
1267 | fmt: "mlx4-async@pci:%s" , |
1268 | pci_name(pdev: dev->persist->pdev)); |
1269 | eq_name = priv->eq_table.irq_names + |
1270 | MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; |
1271 | |
1272 | err = request_irq(irq: priv->eq_table.eq[MLX4_EQ_ASYNC].irq, |
1273 | handler: mlx4_msi_x_interrupt, flags: 0, name: eq_name, |
1274 | dev: priv->eq_table.eq + MLX4_EQ_ASYNC); |
1275 | if (err) |
1276 | goto err_out_unmap; |
1277 | |
1278 | priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; |
1279 | } else { |
1280 | snprintf(buf: priv->eq_table.irq_names, |
1281 | size: MLX4_IRQNAME_SIZE, |
1282 | DRV_NAME "@pci:%s" , |
1283 | pci_name(pdev: dev->persist->pdev)); |
1284 | err = request_irq(irq: dev->persist->pdev->irq, handler: mlx4_interrupt, |
1285 | IRQF_SHARED, name: priv->eq_table.irq_names, dev); |
1286 | if (err) |
1287 | goto err_out_unmap; |
1288 | |
1289 | priv->eq_table.have_irq = 1; |
1290 | } |
1291 | |
1292 | err = mlx4_MAP_EQ(dev, event_mask: get_async_ev_mask(dev), unmap: 0, |
1293 | eq_num: priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
1294 | if (err) |
1295 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n" , |
1296 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); |
1297 | |
1298 | /* arm ASYNC eq */ |
1299 | eq_set_ci(eq: &priv->eq_table.eq[MLX4_EQ_ASYNC], req_not: 1); |
1300 | |
1301 | return 0; |
1302 | |
1303 | err_out_unmap: |
1304 | while (i > 0) |
1305 | mlx4_free_eq(dev, eq: &priv->eq_table.eq[--i]); |
1306 | #ifdef CONFIG_RFS_ACCEL |
1307 | for (i = 1; i <= dev->caps.num_ports; i++) { |
1308 | if (mlx4_priv(dev)->port[i].rmap) { |
1309 | free_irq_cpu_rmap(rmap: mlx4_priv(dev)->port[i].rmap); |
1310 | mlx4_priv(dev)->port[i].rmap = NULL; |
1311 | } |
1312 | } |
1313 | #endif |
1314 | mlx4_free_irqs(dev); |
1315 | |
1316 | err_out_clr_int: |
1317 | if (!mlx4_is_slave(dev)) |
1318 | mlx4_unmap_clr_int(dev); |
1319 | |
1320 | err_out_bitmap: |
1321 | mlx4_unmap_uar(dev); |
1322 | mlx4_bitmap_cleanup(bitmap: &priv->eq_table.bitmap); |
1323 | |
1324 | err_out_free: |
1325 | kfree(objp: priv->eq_table.uar_map); |
1326 | |
1327 | return err; |
1328 | } |
1329 | |
1330 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev) |
1331 | { |
1332 | struct mlx4_priv *priv = mlx4_priv(dev); |
1333 | int i; |
1334 | |
1335 | mlx4_MAP_EQ(dev, event_mask: get_async_ev_mask(dev), unmap: 1, |
1336 | eq_num: priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
1337 | |
1338 | #ifdef CONFIG_RFS_ACCEL |
1339 | for (i = 1; i <= dev->caps.num_ports; i++) { |
1340 | if (mlx4_priv(dev)->port[i].rmap) { |
1341 | free_irq_cpu_rmap(rmap: mlx4_priv(dev)->port[i].rmap); |
1342 | mlx4_priv(dev)->port[i].rmap = NULL; |
1343 | } |
1344 | } |
1345 | #endif |
1346 | mlx4_free_irqs(dev); |
1347 | |
1348 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
1349 | mlx4_free_eq(dev, eq: &priv->eq_table.eq[i]); |
1350 | |
1351 | if (!mlx4_is_slave(dev)) |
1352 | mlx4_unmap_clr_int(dev); |
1353 | |
1354 | mlx4_unmap_uar(dev); |
1355 | mlx4_bitmap_cleanup(bitmap: &priv->eq_table.bitmap); |
1356 | |
1357 | kfree(objp: priv->eq_table.uar_map); |
1358 | } |
1359 | |
1360 | /* A test that verifies that we can accept interrupts |
1361 | * on the vector allocated for asynchronous events |
1362 | */ |
1363 | int mlx4_test_async(struct mlx4_dev *dev) |
1364 | { |
1365 | return mlx4_NOP(dev); |
1366 | } |
1367 | EXPORT_SYMBOL(mlx4_test_async); |
1368 | |
1369 | /* A test that verifies that we can accept interrupts |
1370 | * on the given irq vector of the tested port. |
1371 | * Interrupts are checked using the NOP command. |
1372 | */ |
1373 | int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) |
1374 | { |
1375 | struct mlx4_priv *priv = mlx4_priv(dev); |
1376 | int err; |
1377 | |
1378 | /* Temporary use polling for command completions */ |
1379 | mlx4_cmd_use_polling(dev); |
1380 | |
1381 | /* Map the new eq to handle all asynchronous events */ |
1382 | err = mlx4_MAP_EQ(dev, event_mask: get_async_ev_mask(dev), unmap: 0, |
1383 | eq_num: priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); |
1384 | if (err) { |
1385 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n" ); |
1386 | goto out; |
1387 | } |
1388 | |
1389 | /* Go back to using events */ |
1390 | mlx4_cmd_use_events(dev); |
1391 | err = mlx4_NOP(dev); |
1392 | |
1393 | /* Return to default */ |
1394 | mlx4_cmd_use_polling(dev); |
1395 | out: |
1396 | mlx4_MAP_EQ(dev, event_mask: get_async_ev_mask(dev), unmap: 0, |
1397 | eq_num: priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
1398 | mlx4_cmd_use_events(dev); |
1399 | |
1400 | return err; |
1401 | } |
1402 | EXPORT_SYMBOL(mlx4_test_interrupt); |
1403 | |
1404 | bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) |
1405 | { |
1406 | struct mlx4_priv *priv = mlx4_priv(dev); |
1407 | |
1408 | vector = MLX4_CQ_TO_EQ_VECTOR(vector); |
1409 | if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || |
1410 | (vector == MLX4_EQ_ASYNC)) |
1411 | return false; |
1412 | |
1413 | return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); |
1414 | } |
1415 | EXPORT_SYMBOL(mlx4_is_eq_vector_valid); |
1416 | |
1417 | u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) |
1418 | { |
1419 | struct mlx4_priv *priv = mlx4_priv(dev); |
1420 | unsigned int i; |
1421 | unsigned int sum = 0; |
1422 | |
1423 | for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) |
1424 | sum += !!test_bit(port - 1, |
1425 | priv->eq_table.eq[i].actv_ports.ports); |
1426 | |
1427 | return sum; |
1428 | } |
1429 | EXPORT_SYMBOL(mlx4_get_eqs_per_port); |
1430 | |
1431 | int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) |
1432 | { |
1433 | struct mlx4_priv *priv = mlx4_priv(dev); |
1434 | |
1435 | vector = MLX4_CQ_TO_EQ_VECTOR(vector); |
1436 | if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) |
1437 | return -EINVAL; |
1438 | |
1439 | return !!(bitmap_weight(src: priv->eq_table.eq[vector].actv_ports.ports, |
1440 | nbits: dev->caps.num_ports) > 1); |
1441 | } |
1442 | EXPORT_SYMBOL(mlx4_is_eq_shared); |
1443 | |
1444 | struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) |
1445 | { |
1446 | return mlx4_priv(dev)->port[port].rmap; |
1447 | } |
1448 | EXPORT_SYMBOL(mlx4_get_cpu_rmap); |
1449 | |
1450 | int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) |
1451 | { |
1452 | struct mlx4_priv *priv = mlx4_priv(dev); |
1453 | int err = 0, i = 0; |
1454 | u32 min_ref_count_val = (u32)-1; |
1455 | int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); |
1456 | int *prequested_vector = NULL; |
1457 | |
1458 | |
1459 | mutex_lock(&priv->msix_ctl.pool_lock); |
1460 | if (requested_vector < (dev->caps.num_comp_vectors + 1) && |
1461 | (requested_vector >= 0) && |
1462 | (requested_vector != MLX4_EQ_ASYNC)) { |
1463 | if (test_bit(port - 1, |
1464 | priv->eq_table.eq[requested_vector].actv_ports.ports)) { |
1465 | prequested_vector = &requested_vector; |
1466 | } else { |
1467 | struct mlx4_eq *eq; |
1468 | |
1469 | for (i = 1; i < port; |
1470 | requested_vector += mlx4_get_eqs_per_port(dev, i++)) |
1471 | ; |
1472 | |
1473 | eq = &priv->eq_table.eq[requested_vector]; |
1474 | if (requested_vector < dev->caps.num_comp_vectors + 1 && |
1475 | test_bit(port - 1, eq->actv_ports.ports)) { |
1476 | prequested_vector = &requested_vector; |
1477 | } |
1478 | } |
1479 | } |
1480 | |
1481 | if (!prequested_vector) { |
1482 | requested_vector = -1; |
1483 | for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; |
1484 | i++) { |
1485 | struct mlx4_eq *eq = &priv->eq_table.eq[i]; |
1486 | |
1487 | if (min_ref_count_val > eq->ref_count && |
1488 | test_bit(port - 1, eq->actv_ports.ports)) { |
1489 | min_ref_count_val = eq->ref_count; |
1490 | requested_vector = i; |
1491 | } |
1492 | } |
1493 | |
1494 | if (requested_vector < 0) { |
1495 | err = -ENOSPC; |
1496 | goto err_unlock; |
1497 | } |
1498 | |
1499 | prequested_vector = &requested_vector; |
1500 | } |
1501 | |
1502 | if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && |
1503 | dev->flags & MLX4_FLAG_MSI_X) { |
1504 | set_bit(nr: *prequested_vector, addr: priv->msix_ctl.pool_bm); |
1505 | snprintf(buf: priv->eq_table.irq_names + |
1506 | *prequested_vector * MLX4_IRQNAME_SIZE, |
1507 | size: MLX4_IRQNAME_SIZE, fmt: "mlx4-%d@%s" , |
1508 | *prequested_vector, dev_name(dev: &dev->persist->pdev->dev)); |
1509 | |
1510 | err = request_irq(irq: priv->eq_table.eq[*prequested_vector].irq, |
1511 | handler: mlx4_msi_x_interrupt, flags: 0, |
1512 | name: &priv->eq_table.irq_names[*prequested_vector << 5], |
1513 | dev: priv->eq_table.eq + *prequested_vector); |
1514 | |
1515 | if (err) { |
1516 | clear_bit(nr: *prequested_vector, addr: priv->msix_ctl.pool_bm); |
1517 | *prequested_vector = -1; |
1518 | } else { |
1519 | #if defined(CONFIG_SMP) |
1520 | mlx4_set_eq_affinity_hint(priv, vec: *prequested_vector); |
1521 | #endif |
1522 | eq_set_ci(eq: &priv->eq_table.eq[*prequested_vector], req_not: 1); |
1523 | priv->eq_table.eq[*prequested_vector].have_irq = 1; |
1524 | } |
1525 | } |
1526 | |
1527 | if (!err && *prequested_vector >= 0) |
1528 | priv->eq_table.eq[*prequested_vector].ref_count++; |
1529 | |
1530 | err_unlock: |
1531 | mutex_unlock(lock: &priv->msix_ctl.pool_lock); |
1532 | |
1533 | if (!err && *prequested_vector >= 0) |
1534 | *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); |
1535 | else |
1536 | *vector = 0; |
1537 | |
1538 | return err; |
1539 | } |
1540 | EXPORT_SYMBOL(mlx4_assign_eq); |
1541 | |
1542 | int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) |
1543 | { |
1544 | struct mlx4_priv *priv = mlx4_priv(dev); |
1545 | |
1546 | return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; |
1547 | } |
1548 | EXPORT_SYMBOL(mlx4_eq_get_irq); |
1549 | |
1550 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) |
1551 | { |
1552 | struct mlx4_priv *priv = mlx4_priv(dev); |
1553 | int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); |
1554 | |
1555 | mutex_lock(&priv->msix_ctl.pool_lock); |
1556 | priv->eq_table.eq[eq_vec].ref_count--; |
1557 | |
1558 | /* once we allocated EQ, we don't release it because it might be binded |
1559 | * to cpu_rmap. |
1560 | */ |
1561 | mutex_unlock(lock: &priv->msix_ctl.pool_lock); |
1562 | } |
1563 | EXPORT_SYMBOL(mlx4_release_eq); |
1564 | |
1565 | |