1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <asm/byteorder.h> |
9 | #include <linux/io.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/list.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/string.h> |
19 | #include "qed.h" |
20 | #include "qed_cxt.h" |
21 | #include "qed_dev_api.h" |
22 | #include "qed_hsi.h" |
23 | #include "qed_iro_hsi.h" |
24 | #include "qed_hw.h" |
25 | #include "qed_int.h" |
26 | #include "qed_iscsi.h" |
27 | #include "qed_mcp.h" |
28 | #include "qed_ooo.h" |
29 | #include "qed_reg_addr.h" |
30 | #include "qed_sp.h" |
31 | #include "qed_sriov.h" |
32 | #include "qed_rdma.h" |
33 | |
34 | /*************************************************************************** |
35 | * Structures & Definitions |
36 | ***************************************************************************/ |
37 | |
38 | #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) |
39 | |
40 | #define SPQ_BLOCK_DELAY_MAX_ITER (10) |
41 | #define SPQ_BLOCK_DELAY_US (10) |
42 | #define SPQ_BLOCK_SLEEP_MAX_ITER (1000) |
43 | #define SPQ_BLOCK_SLEEP_MS (5) |
44 | |
45 | /*************************************************************************** |
46 | * Blocking Imp. (BLOCK/EBLOCK mode) |
47 | ***************************************************************************/ |
48 | static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, |
49 | void *cookie, |
50 | union event_ring_data *data, u8 fw_return_code) |
51 | { |
52 | struct qed_spq_comp_done *comp_done; |
53 | |
54 | comp_done = (struct qed_spq_comp_done *)cookie; |
55 | |
56 | comp_done->fw_return_code = fw_return_code; |
57 | |
58 | /* Make sure completion done is visible on waiting thread */ |
59 | smp_store_release(&comp_done->done, 0x1); |
60 | } |
61 | |
62 | static int __qed_spq_block(struct qed_hwfn *p_hwfn, |
63 | struct qed_spq_entry *p_ent, |
64 | u8 *p_fw_ret, bool sleep_between_iter) |
65 | { |
66 | struct qed_spq_comp_done *comp_done; |
67 | u32 iter_cnt; |
68 | |
69 | comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; |
70 | iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER |
71 | : SPQ_BLOCK_DELAY_MAX_ITER; |
72 | |
73 | while (iter_cnt--) { |
74 | /* Validate we receive completion update */ |
75 | if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ |
76 | if (p_fw_ret) |
77 | *p_fw_ret = comp_done->fw_return_code; |
78 | return 0; |
79 | } |
80 | |
81 | if (sleep_between_iter) |
82 | msleep(SPQ_BLOCK_SLEEP_MS); |
83 | else |
84 | udelay(SPQ_BLOCK_DELAY_US); |
85 | } |
86 | |
87 | return -EBUSY; |
88 | } |
89 | |
90 | static int qed_spq_block(struct qed_hwfn *p_hwfn, |
91 | struct qed_spq_entry *p_ent, |
92 | u8 *p_fw_ret, bool skip_quick_poll) |
93 | { |
94 | struct qed_spq_comp_done *comp_done; |
95 | struct qed_ptt *p_ptt; |
96 | int rc; |
97 | |
98 | /* A relatively short polling period w/o sleeping, to allow the FW to |
99 | * complete the ramrod and thus possibly to avoid the following sleeps. |
100 | */ |
101 | if (!skip_quick_poll) { |
102 | rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, sleep_between_iter: false); |
103 | if (!rc) |
104 | return 0; |
105 | } |
106 | |
107 | /* Move to polling with a sleeping period between iterations */ |
108 | rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, sleep_between_iter: true); |
109 | if (!rc) |
110 | return 0; |
111 | |
112 | p_ptt = qed_ptt_acquire(p_hwfn); |
113 | if (!p_ptt) { |
114 | DP_NOTICE(p_hwfn, "ptt, failed to acquire\n" ); |
115 | return -EAGAIN; |
116 | } |
117 | |
118 | DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n" ); |
119 | rc = qed_mcp_drain(p_hwfn, p_ptt); |
120 | qed_ptt_release(p_hwfn, p_ptt); |
121 | if (rc) { |
122 | DP_NOTICE(p_hwfn, "MCP drain failed\n" ); |
123 | goto err; |
124 | } |
125 | |
126 | /* Retry after drain */ |
127 | rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, sleep_between_iter: true); |
128 | if (!rc) |
129 | return 0; |
130 | |
131 | comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; |
132 | if (comp_done->done == 1) { |
133 | if (p_fw_ret) |
134 | *p_fw_ret = comp_done->fw_return_code; |
135 | return 0; |
136 | } |
137 | err: |
138 | p_ptt = qed_ptt_acquire(p_hwfn); |
139 | if (!p_ptt) |
140 | return -EBUSY; |
141 | qed_hw_err_notify(p_hwfn, p_ptt, err_type: QED_HW_ERR_RAMROD_FAIL, |
142 | fmt: "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n" , |
143 | le32_to_cpu(p_ent->elem.hdr.cid), |
144 | qed_get_ramrod_cmd_id_str(protocol_type: p_ent->elem.hdr.protocol_id, |
145 | ramrod_cmd_id: p_ent->elem.hdr.cmd_id), |
146 | p_ent->elem.hdr.cmd_id, |
147 | qed_get_protocol_type_str(protocol_type: p_ent->elem.hdr.protocol_id), |
148 | p_ent->elem.hdr.protocol_id, |
149 | le16_to_cpu(p_ent->elem.hdr.echo)); |
150 | qed_ptt_release(p_hwfn, p_ptt); |
151 | |
152 | return -EBUSY; |
153 | } |
154 | |
155 | /*************************************************************************** |
156 | * SPQ entries inner API |
157 | ***************************************************************************/ |
158 | static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, |
159 | struct qed_spq_entry *p_ent) |
160 | { |
161 | p_ent->flags = 0; |
162 | |
163 | switch (p_ent->comp_mode) { |
164 | case QED_SPQ_MODE_EBLOCK: |
165 | case QED_SPQ_MODE_BLOCK: |
166 | p_ent->comp_cb.function = qed_spq_blocking_cb; |
167 | break; |
168 | case QED_SPQ_MODE_CB: |
169 | break; |
170 | default: |
171 | DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n" , |
172 | p_ent->comp_mode); |
173 | return -EINVAL; |
174 | } |
175 | |
176 | DP_VERBOSE(p_hwfn, |
177 | QED_MSG_SPQ, |
178 | "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n" , |
179 | p_ent->elem.hdr.cid, |
180 | qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, |
181 | p_ent->elem.hdr.cmd_id), |
182 | p_ent->elem.hdr.cmd_id, |
183 | qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), |
184 | p_ent->elem.hdr.protocol_id, |
185 | p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, |
186 | D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, |
187 | QED_SPQ_MODE_BLOCK, "MODE_EBLOCK" , "MODE_BLOCK" , |
188 | "MODE_CB" )); |
189 | |
190 | return 0; |
191 | } |
192 | |
193 | /*************************************************************************** |
194 | * HSI access |
195 | ***************************************************************************/ |
196 | static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, |
197 | struct qed_spq *p_spq) |
198 | { |
199 | struct core_conn_context *p_cxt; |
200 | struct qed_cxt_info cxt_info; |
201 | u16 physical_q; |
202 | int rc; |
203 | |
204 | cxt_info.iid = p_spq->cid; |
205 | |
206 | rc = qed_cxt_get_cid_info(p_hwfn, p_info: &cxt_info); |
207 | |
208 | if (rc < 0) { |
209 | DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n" , |
210 | p_spq->cid); |
211 | return; |
212 | } |
213 | |
214 | p_cxt = cxt_info.p_cxt; |
215 | |
216 | SET_FIELD(p_cxt->xstorm_ag_context.flags10, |
217 | XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); |
218 | SET_FIELD(p_cxt->xstorm_ag_context.flags1, |
219 | XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); |
220 | SET_FIELD(p_cxt->xstorm_ag_context.flags9, |
221 | XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); |
222 | |
223 | /* QM physical queue */ |
224 | physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); |
225 | p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); |
226 | |
227 | p_cxt->xstorm_st_context.spq_base_addr.lo = |
228 | DMA_LO_LE(p_spq->chain.p_phys_addr); |
229 | p_cxt->xstorm_st_context.spq_base_addr.hi = |
230 | DMA_HI_LE(p_spq->chain.p_phys_addr); |
231 | } |
232 | |
233 | static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, |
234 | struct qed_spq *p_spq, struct qed_spq_entry *p_ent) |
235 | { |
236 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; |
237 | struct core_db_data *p_db_data = &p_spq->db_data; |
238 | u16 echo = qed_chain_get_prod_idx(chain: p_chain); |
239 | struct slow_path_element *elem; |
240 | |
241 | p_ent->elem.hdr.echo = cpu_to_le16(echo); |
242 | elem = qed_chain_produce(p_chain); |
243 | if (!elem) { |
244 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n" ); |
245 | return -EINVAL; |
246 | } |
247 | |
248 | *elem = p_ent->elem; /* struct assignment */ |
249 | |
250 | /* send a doorbell on the slow hwfn session */ |
251 | p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); |
252 | |
253 | /* make sure the SPQE is updated before the doorbell */ |
254 | wmb(); |
255 | |
256 | DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); |
257 | |
258 | /* make sure doorbell is rang */ |
259 | wmb(); |
260 | |
261 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
262 | "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n" , |
263 | p_spq->db_addr_offset, |
264 | p_spq->cid, |
265 | p_db_data->params, |
266 | p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); |
267 | |
268 | return 0; |
269 | } |
270 | |
271 | /*************************************************************************** |
272 | * Asynchronous events |
273 | ***************************************************************************/ |
274 | static int |
275 | qed_async_event_completion(struct qed_hwfn *p_hwfn, |
276 | struct event_ring_entry *p_eqe) |
277 | { |
278 | qed_spq_async_comp_cb cb; |
279 | |
280 | if (!p_hwfn->p_spq) |
281 | return -EINVAL; |
282 | |
283 | if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { |
284 | DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n" , |
285 | qed_get_protocol_type_str(p_eqe->protocol_id), |
286 | p_eqe->protocol_id); |
287 | |
288 | return -EINVAL; |
289 | } |
290 | |
291 | cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; |
292 | if (cb) { |
293 | return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, |
294 | &p_eqe->data, p_eqe->fw_return_code); |
295 | } else { |
296 | DP_NOTICE(p_hwfn, |
297 | "Unknown Async completion for %s:%d\n" , |
298 | qed_get_protocol_type_str(p_eqe->protocol_id), |
299 | p_eqe->protocol_id); |
300 | |
301 | return -EINVAL; |
302 | } |
303 | } |
304 | |
305 | int |
306 | qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, |
307 | enum protocol_type protocol_id, |
308 | qed_spq_async_comp_cb cb) |
309 | { |
310 | if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) |
311 | return -EINVAL; |
312 | |
313 | p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; |
314 | return 0; |
315 | } |
316 | |
317 | void |
318 | qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, |
319 | enum protocol_type protocol_id) |
320 | { |
321 | if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) |
322 | return; |
323 | |
324 | p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; |
325 | } |
326 | |
327 | /*************************************************************************** |
328 | * EQ API |
329 | ***************************************************************************/ |
330 | void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) |
331 | { |
332 | u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, |
333 | USTORM_EQE_CONS, p_hwfn->rel_pf_id); |
334 | |
335 | REG_WR16(p_hwfn, addr, prod); |
336 | } |
337 | |
338 | int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) |
339 | { |
340 | struct qed_eq *p_eq = cookie; |
341 | struct qed_chain *p_chain = &p_eq->chain; |
342 | int rc = 0; |
343 | |
344 | /* take a snapshot of the FW consumer */ |
345 | u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); |
346 | |
347 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n" , fw_cons_idx); |
348 | |
349 | /* Need to guarantee the fw_cons index we use points to a usuable |
350 | * element (to comply with our chain), so our macros would comply |
351 | */ |
352 | if ((fw_cons_idx & qed_chain_get_usable_per_page(chain: p_chain)) == |
353 | qed_chain_get_usable_per_page(chain: p_chain)) |
354 | fw_cons_idx += qed_chain_get_unusable_per_page(chain: p_chain); |
355 | |
356 | /* Complete current segment of eq entries */ |
357 | while (fw_cons_idx != qed_chain_get_cons_idx(chain: p_chain)) { |
358 | struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); |
359 | |
360 | if (!p_eqe) { |
361 | rc = -EINVAL; |
362 | break; |
363 | } |
364 | |
365 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
366 | "op %x prot %x res0 %x echo %x fwret %x flags %x\n" , |
367 | p_eqe->opcode, |
368 | p_eqe->protocol_id, |
369 | p_eqe->reserved0, |
370 | le16_to_cpu(p_eqe->echo), |
371 | p_eqe->fw_return_code, |
372 | p_eqe->flags); |
373 | |
374 | if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { |
375 | if (qed_async_event_completion(p_hwfn, p_eqe)) |
376 | rc = -EINVAL; |
377 | } else if (qed_spq_completion(p_hwfn, |
378 | echo: p_eqe->echo, |
379 | fw_return_code: p_eqe->fw_return_code, |
380 | p_data: &p_eqe->data)) { |
381 | rc = -EINVAL; |
382 | } |
383 | |
384 | qed_chain_recycle_consumed(p_chain); |
385 | } |
386 | |
387 | qed_eq_prod_update(p_hwfn, prod: qed_chain_get_prod_idx(chain: p_chain)); |
388 | |
389 | /* Attempt to post pending requests */ |
390 | spin_lock_bh(lock: &p_hwfn->p_spq->lock); |
391 | rc = qed_spq_pend_post(p_hwfn); |
392 | spin_unlock_bh(lock: &p_hwfn->p_spq->lock); |
393 | |
394 | return rc; |
395 | } |
396 | |
397 | int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) |
398 | { |
399 | struct qed_chain_init_params params = { |
400 | .mode = QED_CHAIN_MODE_PBL, |
401 | .intended_use = QED_CHAIN_USE_TO_PRODUCE, |
402 | .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
403 | .num_elems = num_elem, |
404 | .elem_size = sizeof(union event_ring_element), |
405 | }; |
406 | struct qed_eq *p_eq; |
407 | int ret; |
408 | |
409 | /* Allocate EQ struct */ |
410 | p_eq = kzalloc(size: sizeof(*p_eq), GFP_KERNEL); |
411 | if (!p_eq) |
412 | return -ENOMEM; |
413 | |
414 | ret = qed_chain_alloc(cdev: p_hwfn->cdev, chain: &p_eq->chain, params: ¶ms); |
415 | if (ret) { |
416 | DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n" ); |
417 | goto eq_allocate_fail; |
418 | } |
419 | |
420 | /* register EQ completion on the SP SB */ |
421 | qed_int_register_cb(p_hwfn, comp_cb: qed_eq_completion, |
422 | cookie: p_eq, sb_idx: &p_eq->eq_sb_index, p_fw_cons: &p_eq->p_fw_cons); |
423 | |
424 | p_hwfn->p_eq = p_eq; |
425 | return 0; |
426 | |
427 | eq_allocate_fail: |
428 | kfree(objp: p_eq); |
429 | |
430 | return ret; |
431 | } |
432 | |
433 | void qed_eq_setup(struct qed_hwfn *p_hwfn) |
434 | { |
435 | qed_chain_reset(p_chain: &p_hwfn->p_eq->chain); |
436 | } |
437 | |
438 | void qed_eq_free(struct qed_hwfn *p_hwfn) |
439 | { |
440 | if (!p_hwfn->p_eq) |
441 | return; |
442 | |
443 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_hwfn->p_eq->chain); |
444 | |
445 | kfree(objp: p_hwfn->p_eq); |
446 | p_hwfn->p_eq = NULL; |
447 | } |
448 | |
449 | /*************************************************************************** |
450 | * CQE API - manipulate EQ functionality |
451 | ***************************************************************************/ |
452 | static int qed_cqe_completion(struct qed_hwfn *p_hwfn, |
453 | struct eth_slow_path_rx_cqe *cqe, |
454 | enum protocol_type protocol) |
455 | { |
456 | if (IS_VF(p_hwfn->cdev)) |
457 | return 0; |
458 | |
459 | /* @@@tmp - it's possible we'll eventually want to handle some |
460 | * actual commands that can arrive here, but for now this is only |
461 | * used to complete the ramrod using the echo value on the cqe |
462 | */ |
463 | return qed_spq_completion(p_hwfn, echo: cqe->echo, fw_return_code: 0, NULL); |
464 | } |
465 | |
466 | int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, |
467 | struct eth_slow_path_rx_cqe *cqe) |
468 | { |
469 | int rc; |
470 | |
471 | rc = qed_cqe_completion(p_hwfn, cqe, protocol: PROTOCOLID_ETH); |
472 | if (rc) |
473 | DP_NOTICE(p_hwfn, |
474 | "Failed to handle RXQ CQE [cmd 0x%02x]\n" , |
475 | cqe->ramrod_cmd_id); |
476 | |
477 | return rc; |
478 | } |
479 | |
480 | /*************************************************************************** |
481 | * Slow hwfn Queue (spq) |
482 | ***************************************************************************/ |
483 | void qed_spq_setup(struct qed_hwfn *p_hwfn) |
484 | { |
485 | struct qed_spq *p_spq = p_hwfn->p_spq; |
486 | struct qed_spq_entry *p_virt = NULL; |
487 | struct core_db_data *p_db_data; |
488 | void __iomem *db_addr; |
489 | dma_addr_t p_phys = 0; |
490 | u32 i, capacity; |
491 | int rc; |
492 | |
493 | INIT_LIST_HEAD(list: &p_spq->pending); |
494 | INIT_LIST_HEAD(list: &p_spq->completion_pending); |
495 | INIT_LIST_HEAD(list: &p_spq->free_pool); |
496 | INIT_LIST_HEAD(list: &p_spq->unlimited_pending); |
497 | spin_lock_init(&p_spq->lock); |
498 | |
499 | /* SPQ empty pool */ |
500 | p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); |
501 | p_virt = p_spq->p_virt; |
502 | |
503 | capacity = qed_chain_get_capacity(p_chain: &p_spq->chain); |
504 | for (i = 0; i < capacity; i++) { |
505 | DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); |
506 | |
507 | list_add_tail(new: &p_virt->list, head: &p_spq->free_pool); |
508 | |
509 | p_virt++; |
510 | p_phys += sizeof(struct qed_spq_entry); |
511 | } |
512 | |
513 | /* Statistics */ |
514 | p_spq->normal_count = 0; |
515 | p_spq->comp_count = 0; |
516 | p_spq->comp_sent_count = 0; |
517 | p_spq->unlimited_pending_count = 0; |
518 | |
519 | bitmap_zero(dst: p_spq->p_comp_bitmap, SPQ_RING_SIZE); |
520 | p_spq->comp_bitmap_idx = 0; |
521 | |
522 | /* SPQ cid, cannot fail */ |
523 | qed_cxt_acquire_cid(p_hwfn, type: PROTOCOLID_CORE, p_cid: &p_spq->cid); |
524 | qed_spq_hw_initialize(p_hwfn, p_spq); |
525 | |
526 | /* reset the chain itself */ |
527 | qed_chain_reset(p_chain: &p_spq->chain); |
528 | |
529 | /* Initialize the address/data of the SPQ doorbell */ |
530 | p_spq->db_addr_offset = qed_db_addr(cid: p_spq->cid, DQ_DEMS_LEGACY); |
531 | p_db_data = &p_spq->db_data; |
532 | memset(p_db_data, 0, sizeof(*p_db_data)); |
533 | SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); |
534 | SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); |
535 | SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, |
536 | DQ_XCM_CORE_SPQ_PROD_CMD); |
537 | p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; |
538 | |
539 | /* Register the SPQ doorbell with the doorbell recovery mechanism */ |
540 | db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + |
541 | p_spq->db_addr_offset); |
542 | rc = qed_db_recovery_add(cdev: p_hwfn->cdev, db_addr, db_data: &p_spq->db_data, |
543 | db_width: DB_REC_WIDTH_32B, db_space: DB_REC_KERNEL); |
544 | if (rc) |
545 | DP_INFO(p_hwfn, |
546 | "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n" ); |
547 | } |
548 | |
549 | int qed_spq_alloc(struct qed_hwfn *p_hwfn) |
550 | { |
551 | struct qed_chain_init_params params = { |
552 | .mode = QED_CHAIN_MODE_SINGLE, |
553 | .intended_use = QED_CHAIN_USE_TO_PRODUCE, |
554 | .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
555 | .elem_size = sizeof(struct slow_path_element), |
556 | }; |
557 | struct qed_dev *cdev = p_hwfn->cdev; |
558 | struct qed_spq_entry *p_virt = NULL; |
559 | struct qed_spq *p_spq = NULL; |
560 | dma_addr_t p_phys = 0; |
561 | u32 capacity; |
562 | int ret; |
563 | |
564 | /* SPQ struct */ |
565 | p_spq = kzalloc(size: sizeof(*p_spq), GFP_KERNEL); |
566 | if (!p_spq) |
567 | return -ENOMEM; |
568 | |
569 | /* SPQ ring */ |
570 | ret = qed_chain_alloc(cdev, chain: &p_spq->chain, params: ¶ms); |
571 | if (ret) { |
572 | DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n" ); |
573 | goto spq_chain_alloc_fail; |
574 | } |
575 | |
576 | /* allocate and fill the SPQ elements (incl. ramrod data list) */ |
577 | capacity = qed_chain_get_capacity(p_chain: &p_spq->chain); |
578 | ret = -ENOMEM; |
579 | |
580 | p_virt = dma_alloc_coherent(dev: &cdev->pdev->dev, |
581 | size: capacity * sizeof(struct qed_spq_entry), |
582 | dma_handle: &p_phys, GFP_KERNEL); |
583 | if (!p_virt) |
584 | goto spq_alloc_fail; |
585 | |
586 | p_spq->p_virt = p_virt; |
587 | p_spq->p_phys = p_phys; |
588 | p_hwfn->p_spq = p_spq; |
589 | |
590 | return 0; |
591 | |
592 | spq_alloc_fail: |
593 | qed_chain_free(cdev, chain: &p_spq->chain); |
594 | spq_chain_alloc_fail: |
595 | kfree(objp: p_spq); |
596 | |
597 | return ret; |
598 | } |
599 | |
600 | void qed_spq_free(struct qed_hwfn *p_hwfn) |
601 | { |
602 | struct qed_spq *p_spq = p_hwfn->p_spq; |
603 | void __iomem *db_addr; |
604 | u32 capacity; |
605 | |
606 | if (!p_spq) |
607 | return; |
608 | |
609 | /* Delete the SPQ doorbell from the doorbell recovery mechanism */ |
610 | db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + |
611 | p_spq->db_addr_offset); |
612 | qed_db_recovery_del(cdev: p_hwfn->cdev, db_addr, db_data: &p_spq->db_data); |
613 | |
614 | if (p_spq->p_virt) { |
615 | capacity = qed_chain_get_capacity(p_chain: &p_spq->chain); |
616 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
617 | size: capacity * |
618 | sizeof(struct qed_spq_entry), |
619 | cpu_addr: p_spq->p_virt, dma_handle: p_spq->p_phys); |
620 | } |
621 | |
622 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_spq->chain); |
623 | kfree(objp: p_spq); |
624 | p_hwfn->p_spq = NULL; |
625 | } |
626 | |
627 | int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) |
628 | { |
629 | struct qed_spq *p_spq = p_hwfn->p_spq; |
630 | struct qed_spq_entry *p_ent = NULL; |
631 | int rc = 0; |
632 | |
633 | spin_lock_bh(lock: &p_spq->lock); |
634 | |
635 | if (list_empty(head: &p_spq->free_pool)) { |
636 | p_ent = kzalloc(size: sizeof(*p_ent), GFP_ATOMIC); |
637 | if (!p_ent) { |
638 | DP_NOTICE(p_hwfn, |
639 | "Failed to allocate an SPQ entry for a pending ramrod\n" ); |
640 | rc = -ENOMEM; |
641 | goto out_unlock; |
642 | } |
643 | p_ent->queue = &p_spq->unlimited_pending; |
644 | } else { |
645 | p_ent = list_first_entry(&p_spq->free_pool, |
646 | struct qed_spq_entry, list); |
647 | list_del(entry: &p_ent->list); |
648 | p_ent->queue = &p_spq->pending; |
649 | } |
650 | |
651 | *pp_ent = p_ent; |
652 | |
653 | out_unlock: |
654 | spin_unlock_bh(lock: &p_spq->lock); |
655 | return rc; |
656 | } |
657 | |
658 | /* Locked variant; Should be called while the SPQ lock is taken */ |
659 | static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, |
660 | struct qed_spq_entry *p_ent) |
661 | { |
662 | list_add_tail(new: &p_ent->list, head: &p_hwfn->p_spq->free_pool); |
663 | } |
664 | |
665 | void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) |
666 | { |
667 | spin_lock_bh(lock: &p_hwfn->p_spq->lock); |
668 | __qed_spq_return_entry(p_hwfn, p_ent); |
669 | spin_unlock_bh(lock: &p_hwfn->p_spq->lock); |
670 | } |
671 | |
672 | /** |
673 | * qed_spq_add_entry() - Add a new entry to the pending list. |
674 | * Should be used while lock is being held. |
675 | * |
676 | * @p_hwfn: HW device data. |
677 | * @p_ent: An entry to add. |
678 | * @priority: Desired priority. |
679 | * |
680 | * Adds an entry to the pending list is there is room (an empty |
681 | * element is available in the free_pool), or else places the |
682 | * entry in the unlimited_pending pool. |
683 | * |
684 | * Return: zero on success, -EINVAL on invalid @priority. |
685 | */ |
686 | static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, |
687 | struct qed_spq_entry *p_ent, |
688 | enum spq_priority priority) |
689 | { |
690 | struct qed_spq *p_spq = p_hwfn->p_spq; |
691 | |
692 | if (p_ent->queue == &p_spq->unlimited_pending) { |
693 | if (list_empty(head: &p_spq->free_pool)) { |
694 | list_add_tail(new: &p_ent->list, head: &p_spq->unlimited_pending); |
695 | p_spq->unlimited_pending_count++; |
696 | |
697 | return 0; |
698 | } else { |
699 | struct qed_spq_entry *p_en2; |
700 | |
701 | p_en2 = list_first_entry(&p_spq->free_pool, |
702 | struct qed_spq_entry, list); |
703 | list_del(entry: &p_en2->list); |
704 | |
705 | /* Copy the ring element physical pointer to the new |
706 | * entry, since we are about to override the entire ring |
707 | * entry and don't want to lose the pointer. |
708 | */ |
709 | p_ent->elem.data_ptr = p_en2->elem.data_ptr; |
710 | |
711 | *p_en2 = *p_ent; |
712 | |
713 | /* EBLOCK responsible to free the allocated p_ent */ |
714 | if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) |
715 | kfree(objp: p_ent); |
716 | else |
717 | p_ent->post_ent = p_en2; |
718 | |
719 | p_ent = p_en2; |
720 | } |
721 | } |
722 | |
723 | /* entry is to be placed in 'pending' queue */ |
724 | switch (priority) { |
725 | case QED_SPQ_PRIORITY_NORMAL: |
726 | list_add_tail(new: &p_ent->list, head: &p_spq->pending); |
727 | p_spq->normal_count++; |
728 | break; |
729 | case QED_SPQ_PRIORITY_HIGH: |
730 | list_add(new: &p_ent->list, head: &p_spq->pending); |
731 | p_spq->high_count++; |
732 | break; |
733 | default: |
734 | return -EINVAL; |
735 | } |
736 | |
737 | return 0; |
738 | } |
739 | |
740 | /*************************************************************************** |
741 | * Accessor |
742 | ***************************************************************************/ |
743 | u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) |
744 | { |
745 | if (!p_hwfn->p_spq) |
746 | return 0xffffffff; /* illegal */ |
747 | return p_hwfn->p_spq->cid; |
748 | } |
749 | |
750 | /*************************************************************************** |
751 | * Posting new Ramrods |
752 | ***************************************************************************/ |
753 | static int qed_spq_post_list(struct qed_hwfn *p_hwfn, |
754 | struct list_head *head, u32 keep_reserve) |
755 | { |
756 | struct qed_spq *p_spq = p_hwfn->p_spq; |
757 | int rc; |
758 | |
759 | while (qed_chain_get_elem_left(chain: &p_spq->chain) > keep_reserve && |
760 | !list_empty(head)) { |
761 | struct qed_spq_entry *p_ent = |
762 | list_first_entry(head, struct qed_spq_entry, list); |
763 | list_move_tail(list: &p_ent->list, head: &p_spq->completion_pending); |
764 | p_spq->comp_sent_count++; |
765 | |
766 | rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); |
767 | if (rc) { |
768 | list_del(entry: &p_ent->list); |
769 | __qed_spq_return_entry(p_hwfn, p_ent); |
770 | return rc; |
771 | } |
772 | } |
773 | |
774 | return 0; |
775 | } |
776 | |
777 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn) |
778 | { |
779 | struct qed_spq *p_spq = p_hwfn->p_spq; |
780 | struct qed_spq_entry *p_ent = NULL; |
781 | |
782 | while (!list_empty(head: &p_spq->free_pool)) { |
783 | if (list_empty(head: &p_spq->unlimited_pending)) |
784 | break; |
785 | |
786 | p_ent = list_first_entry(&p_spq->unlimited_pending, |
787 | struct qed_spq_entry, list); |
788 | if (!p_ent) |
789 | return -EINVAL; |
790 | |
791 | list_del(entry: &p_ent->list); |
792 | |
793 | qed_spq_add_entry(p_hwfn, p_ent, priority: p_ent->priority); |
794 | } |
795 | |
796 | return qed_spq_post_list(p_hwfn, head: &p_spq->pending, |
797 | SPQ_HIGH_PRI_RESERVE_DEFAULT); |
798 | } |
799 | |
800 | static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, |
801 | u8 *fw_return_code) |
802 | { |
803 | if (!fw_return_code) |
804 | return; |
805 | |
806 | if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || |
807 | p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) |
808 | *fw_return_code = RDMA_RETURN_OK; |
809 | } |
810 | |
811 | /* Avoid overriding of SPQ entries when getting out-of-order completions, by |
812 | * marking the completions in a bitmap and increasing the chain consumer only |
813 | * for the first successive completed entries. |
814 | */ |
815 | static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) |
816 | { |
817 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; |
818 | struct qed_spq *p_spq = p_hwfn->p_spq; |
819 | |
820 | __set_bit(pos, p_spq->p_comp_bitmap); |
821 | while (test_bit(p_spq->comp_bitmap_idx, |
822 | p_spq->p_comp_bitmap)) { |
823 | __clear_bit(p_spq->comp_bitmap_idx, |
824 | p_spq->p_comp_bitmap); |
825 | p_spq->comp_bitmap_idx++; |
826 | qed_chain_return_produced(p_chain: &p_spq->chain); |
827 | } |
828 | } |
829 | |
830 | int qed_spq_post(struct qed_hwfn *p_hwfn, |
831 | struct qed_spq_entry *p_ent, u8 *fw_return_code) |
832 | { |
833 | int rc = 0; |
834 | struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; |
835 | bool b_ret_ent = true; |
836 | bool eblock; |
837 | |
838 | if (!p_hwfn) |
839 | return -EINVAL; |
840 | |
841 | if (!p_ent) { |
842 | DP_NOTICE(p_hwfn, "Got a NULL pointer\n" ); |
843 | return -EINVAL; |
844 | } |
845 | |
846 | if (p_hwfn->cdev->recov_in_prog) { |
847 | DP_VERBOSE(p_hwfn, |
848 | QED_MSG_SPQ, |
849 | "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n" , |
850 | qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, |
851 | p_ent->elem.hdr.cmd_id), |
852 | p_ent->elem.hdr.cmd_id, |
853 | qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), |
854 | p_ent->elem.hdr.protocol_id); |
855 | |
856 | /* Let the flow complete w/o any error handling */ |
857 | qed_spq_recov_set_ret_code(p_ent, fw_return_code); |
858 | return 0; |
859 | } |
860 | |
861 | /* Complete the entry */ |
862 | rc = qed_spq_fill_entry(p_hwfn, p_ent); |
863 | |
864 | spin_lock_bh(lock: &p_spq->lock); |
865 | |
866 | /* Check return value after LOCK is taken for cleaner error flow */ |
867 | if (rc) |
868 | goto spq_post_fail; |
869 | |
870 | /* Check if entry is in block mode before qed_spq_add_entry, |
871 | * which might kfree p_ent. |
872 | */ |
873 | eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); |
874 | |
875 | /* Add the request to the pending queue */ |
876 | rc = qed_spq_add_entry(p_hwfn, p_ent, priority: p_ent->priority); |
877 | if (rc) |
878 | goto spq_post_fail; |
879 | |
880 | rc = qed_spq_pend_post(p_hwfn); |
881 | if (rc) { |
882 | /* Since it's possible that pending failed for a different |
883 | * entry [although unlikely], the failed entry was already |
884 | * dealt with; No need to return it here. |
885 | */ |
886 | b_ret_ent = false; |
887 | goto spq_post_fail; |
888 | } |
889 | |
890 | spin_unlock_bh(lock: &p_spq->lock); |
891 | |
892 | if (eblock) { |
893 | /* For entries in QED BLOCK mode, the completion code cannot |
894 | * perform the necessary cleanup - if it did, we couldn't |
895 | * access p_ent here to see whether it's successful or not. |
896 | * Thus, after gaining the answer perform the cleanup here. |
897 | */ |
898 | rc = qed_spq_block(p_hwfn, p_ent, p_fw_ret: fw_return_code, |
899 | skip_quick_poll: p_ent->queue == &p_spq->unlimited_pending); |
900 | |
901 | if (p_ent->queue == &p_spq->unlimited_pending) { |
902 | struct qed_spq_entry *p_post_ent = p_ent->post_ent; |
903 | |
904 | kfree(objp: p_ent); |
905 | |
906 | /* Return the entry which was actually posted */ |
907 | p_ent = p_post_ent; |
908 | } |
909 | |
910 | if (rc) |
911 | goto spq_post_fail2; |
912 | |
913 | /* return to pool */ |
914 | qed_spq_return_entry(p_hwfn, p_ent); |
915 | } |
916 | return rc; |
917 | |
918 | spq_post_fail2: |
919 | spin_lock_bh(lock: &p_spq->lock); |
920 | list_del(entry: &p_ent->list); |
921 | qed_spq_comp_bmap_update(p_hwfn, echo: p_ent->elem.hdr.echo); |
922 | |
923 | spq_post_fail: |
924 | /* return to the free pool */ |
925 | if (b_ret_ent) |
926 | __qed_spq_return_entry(p_hwfn, p_ent); |
927 | spin_unlock_bh(lock: &p_spq->lock); |
928 | |
929 | return rc; |
930 | } |
931 | |
932 | int qed_spq_completion(struct qed_hwfn *p_hwfn, |
933 | __le16 echo, |
934 | u8 fw_return_code, |
935 | union event_ring_data *p_data) |
936 | { |
937 | struct qed_spq *p_spq; |
938 | struct qed_spq_entry *p_ent = NULL; |
939 | struct qed_spq_entry *tmp; |
940 | struct qed_spq_entry *found = NULL; |
941 | |
942 | if (!p_hwfn) |
943 | return -EINVAL; |
944 | |
945 | p_spq = p_hwfn->p_spq; |
946 | if (!p_spq) |
947 | return -EINVAL; |
948 | |
949 | spin_lock_bh(lock: &p_spq->lock); |
950 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { |
951 | if (p_ent->elem.hdr.echo == echo) { |
952 | list_del(entry: &p_ent->list); |
953 | qed_spq_comp_bmap_update(p_hwfn, echo); |
954 | p_spq->comp_count++; |
955 | found = p_ent; |
956 | break; |
957 | } |
958 | |
959 | /* This is relatively uncommon - depends on scenarios |
960 | * which have mutliple per-PF sent ramrods. |
961 | */ |
962 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
963 | "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n" , |
964 | le16_to_cpu(echo), |
965 | le16_to_cpu(p_ent->elem.hdr.echo)); |
966 | } |
967 | |
968 | /* Release lock before callback, as callback may post |
969 | * an additional ramrod. |
970 | */ |
971 | spin_unlock_bh(lock: &p_spq->lock); |
972 | |
973 | if (!found) { |
974 | DP_NOTICE(p_hwfn, |
975 | "Failed to find an entry this EQE [echo %04x] completes\n" , |
976 | le16_to_cpu(echo)); |
977 | return -EEXIST; |
978 | } |
979 | |
980 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
981 | "Complete EQE [echo %04x]: func %p cookie %p)\n" , |
982 | le16_to_cpu(echo), |
983 | p_ent->comp_cb.function, p_ent->comp_cb.cookie); |
984 | if (found->comp_cb.function) |
985 | found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, |
986 | fw_return_code); |
987 | else |
988 | DP_VERBOSE(p_hwfn, |
989 | QED_MSG_SPQ, |
990 | "Got a completion without a callback function\n" ); |
991 | |
992 | if (found->comp_mode != QED_SPQ_MODE_EBLOCK) |
993 | /* EBLOCK is responsible for returning its own entry into the |
994 | * free list. |
995 | */ |
996 | qed_spq_return_entry(p_hwfn, p_ent: found); |
997 | |
998 | return 0; |
999 | } |
1000 | |
1001 | #define QED_SPQ_CONSQ_ELEM_SIZE 0x80 |
1002 | |
1003 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) |
1004 | { |
1005 | struct qed_chain_init_params params = { |
1006 | .mode = QED_CHAIN_MODE_PBL, |
1007 | .intended_use = QED_CHAIN_USE_TO_PRODUCE, |
1008 | .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
1009 | .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, |
1010 | .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, |
1011 | }; |
1012 | struct qed_consq *p_consq; |
1013 | int ret; |
1014 | |
1015 | /* Allocate ConsQ struct */ |
1016 | p_consq = kzalloc(size: sizeof(*p_consq), GFP_KERNEL); |
1017 | if (!p_consq) |
1018 | return -ENOMEM; |
1019 | |
1020 | /* Allocate and initialize ConsQ chain */ |
1021 | ret = qed_chain_alloc(cdev: p_hwfn->cdev, chain: &p_consq->chain, params: ¶ms); |
1022 | if (ret) { |
1023 | DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain" ); |
1024 | goto consq_alloc_fail; |
1025 | } |
1026 | |
1027 | p_hwfn->p_consq = p_consq; |
1028 | |
1029 | return 0; |
1030 | |
1031 | consq_alloc_fail: |
1032 | kfree(objp: p_consq); |
1033 | |
1034 | return ret; |
1035 | } |
1036 | |
1037 | void qed_consq_setup(struct qed_hwfn *p_hwfn) |
1038 | { |
1039 | qed_chain_reset(p_chain: &p_hwfn->p_consq->chain); |
1040 | } |
1041 | |
1042 | void qed_consq_free(struct qed_hwfn *p_hwfn) |
1043 | { |
1044 | if (!p_hwfn->p_consq) |
1045 | return; |
1046 | |
1047 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_hwfn->p_consq->chain); |
1048 | |
1049 | kfree(objp: p_hwfn->p_consq); |
1050 | p_hwfn->p_consq = NULL; |
1051 | } |
1052 | |