1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/delay.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <scsi/scsi_device.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "csio_hw.h"
42#include "csio_lnode.h"
43#include "csio_rnode.h"
44#include "csio_mb.h"
45#include "csio_wr.h"
46
47#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
48
49/* MB Command/Response Helpers */
50/*
51 * csio_mb_fw_retval - FW return value from a mailbox response.
52 * @mbp: Mailbox structure
53 *
54 */
55enum fw_retval
56csio_mb_fw_retval(struct csio_mb *mbp)
57{
58 struct fw_cmd_hdr *hdr;
59
60 hdr = (struct fw_cmd_hdr *)(mbp->mb);
61
62 return FW_CMD_RETVAL_G(ntohl(hdr->lo));
63}
64
65/*
66 * csio_mb_hello - FW HELLO command helper
67 * @hw: The HW structure
68 * @mbp: Mailbox structure
69 * @m_mbox: Master mailbox number, if any.
70 * @a_mbox: Mailbox number for asycn notifications.
71 * @master: Device mastership.
72 * @cbfn: Callback, if any.
73 *
74 */
75void
76csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
77 uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
78 void (*cbfn) (struct csio_hw *, struct csio_mb *))
79{
80 struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
81
82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
83
84 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) |
85 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
86 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
87 cmdp->err_to_clearinit = htonl(
88 FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) |
89 FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) |
90 FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ?
91 m_mbox : FW_HELLO_CMD_MBMASTER_M) |
92 FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) |
93 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
94 FW_HELLO_CMD_CLEARINIT_F);
95
96}
97
98/*
99 * csio_mb_process_hello_rsp - FW HELLO response processing helper
100 * @hw: The HW structure
101 * @mbp: Mailbox structure
102 * @retval: Mailbox return value from Firmware
103 * @state: State that the function is in.
104 * @mpfn: Master pfn
105 *
106 */
107void
108csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
109 enum fw_retval *retval, enum csio_dev_state *state,
110 uint8_t *mpfn)
111{
112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
113 uint32_t value;
114
115 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
116
117 if (*retval == FW_SUCCESS) {
118 hw->fwrev = ntohl(rsp->fwrev);
119
120 value = ntohl(rsp->err_to_clearinit);
121 *mpfn = FW_HELLO_CMD_MBMASTER_G(value);
122
123 if (value & FW_HELLO_CMD_INIT_F)
124 *state = CSIO_DEV_STATE_INIT;
125 else if (value & FW_HELLO_CMD_ERR_F)
126 *state = CSIO_DEV_STATE_ERR;
127 else
128 *state = CSIO_DEV_STATE_UNINIT;
129 }
130}
131
132/*
133 * csio_mb_bye - FW BYE command helper
134 * @hw: The HW structure
135 * @mbp: Mailbox structure
136 * @cbfn: Callback, if any.
137 *
138 */
139void
140csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
141 void (*cbfn) (struct csio_hw *, struct csio_mb *))
142{
143 struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
144
145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
146
147 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) |
148 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
149 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
150
151}
152
153/*
154 * csio_mb_reset - FW RESET command helper
155 * @hw: The HW structure
156 * @mbp: Mailbox structure
157 * @reset: Type of reset.
158 * @cbfn: Callback, if any.
159 *
160 */
161void
162csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
163 int reset, int halt,
164 void (*cbfn) (struct csio_hw *, struct csio_mb *))
165{
166 struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
167
168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
169
170 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) |
171 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
172 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
173 cmdp->val = htonl(reset);
174 cmdp->halt_pkd = htonl(halt);
175
176}
177
178/*
179 * csio_mb_params - FW PARAMS command helper
180 * @hw: The HW structure
181 * @mbp: Mailbox structure
182 * @tmo: Command timeout.
183 * @pf: PF number.
184 * @vf: VF number.
185 * @nparams: Number of parameters
186 * @params: Parameter mnemonic array.
187 * @val: Parameter value array.
188 * @wr: Write/Read PARAMS.
189 * @cbfn: Callback, if any.
190 *
191 */
192void
193csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
194 unsigned int pf, unsigned int vf, unsigned int nparams,
195 const u32 *params, u32 *val, bool wr,
196 void (*cbfn)(struct csio_hw *, struct csio_mb *))
197{
198 uint32_t i;
199 uint32_t temp_params = 0, temp_val = 0;
200 struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
201 __be32 *p = &cmdp->param[0].mnem;
202
203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
204
205 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) |
206 FW_CMD_REQUEST_F |
207 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) |
208 FW_PARAMS_CMD_PFN_V(pf) |
209 FW_PARAMS_CMD_VFN_V(vf));
210 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
211
212 /* Write Params */
213 if (wr) {
214 while (nparams--) {
215 temp_params = *params++;
216 temp_val = *val++;
217
218 *p++ = htonl(temp_params);
219 *p++ = htonl(temp_val);
220 }
221 } else {
222 for (i = 0; i < nparams; i++, p += 2) {
223 temp_params = *params++;
224 *p = htonl(temp_params);
225 }
226 }
227
228}
229
230/*
231 * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
232 * @hw: The HW structure
233 * @mbp: Mailbox structure
234 * @retval: Mailbox return value from Firmware
235 * @nparams: Number of parameters
236 * @val: Parameter value array.
237 *
238 */
239void
240csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
241 enum fw_retval *retval, unsigned int nparams,
242 u32 *val)
243{
244 struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
245 uint32_t i;
246 __be32 *p = &rsp->param[0].val;
247
248 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
249
250 if (*retval == FW_SUCCESS)
251 for (i = 0; i < nparams; i++, p += 2)
252 *val++ = ntohl(*p);
253}
254
255/*
256 * csio_mb_ldst - FW LDST command
257 * @hw: The HW structure
258 * @mbp: Mailbox structure
259 * @tmo: timeout
260 * @reg: register
261 *
262 */
263void
264csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
265{
266 struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
267 CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
268
269 /*
270 * Construct and send the Firmware LDST Command to retrieve the
271 * specified PCI-E Configuration Space register.
272 */
273 ldst_cmd->op_to_addrspace =
274 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
275 FW_CMD_REQUEST_F |
276 FW_CMD_READ_F |
277 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
280 ldst_cmd->u.pcie.ctrl_to_fn =
281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
282 ldst_cmd->u.pcie.r = (uint8_t)reg;
283}
284
285/*
286 *
287 * csio_mb_caps_config - FW Read/Write Capabilities command helper
288 * @hw: The HW structure
289 * @mbp: Mailbox structure
290 * @wr: Write if 1, Read if 0
291 * @init: Turn on initiator mode.
292 * @tgt: Turn on target mode.
293 * @cofld: If 1, Control Offload for FCoE
294 * @cbfn: Callback, if any.
295 *
296 * This helper assumes that cmdp has MB payload from a previous CAPS
297 * read command.
298 */
299void
300csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
301 bool wr, bool init, bool tgt, bool cofld,
302 void (*cbfn) (struct csio_hw *, struct csio_mb *))
303{
304 struct fw_caps_config_cmd *cmdp =
305 (struct fw_caps_config_cmd *)(mbp->mb);
306
307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
308
309 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
310 FW_CMD_REQUEST_F |
311 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F));
312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
313
314 /* Read config */
315 if (!wr)
316 return;
317
318 /* Write config */
319 cmdp->fcoecaps = 0;
320
321 if (cofld)
322 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
323 if (init)
324 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
325 if (tgt)
326 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
327}
328
329/*
330 * csio_mb_port- FW PORT command helper
331 * @hw: The HW structure
332 * @mbp: Mailbox structure
333 * @tmo: COmmand timeout
334 * @portid: Port ID to get/set info
335 * @wr: Write/Read PORT information.
336 * @fc: Flow control
337 * @caps: Port capabilites to set.
338 * @cbfn: Callback, if any.
339 *
340 */
341void
342csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
343 u8 portid, bool wr, uint32_t fc, uint16_t fw_caps,
344 void (*cbfn) (struct csio_hw *, struct csio_mb *))
345{
346 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
347
348 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
349
350 cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
351 FW_CMD_REQUEST_F |
352 (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) |
353 FW_PORT_CMD_PORTID_V(portid));
354 if (!wr) {
355 cmdp->action_to_len16 = htonl(
356 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
357 ? FW_PORT_ACTION_GET_PORT_INFO
358 : FW_PORT_ACTION_GET_PORT_INFO32) |
359 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
360 return;
361 }
362
363 /* Set port */
364 cmdp->action_to_len16 = htonl(
365 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
366 ? FW_PORT_ACTION_L1_CFG
367 : FW_PORT_ACTION_L1_CFG32) |
368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
369
370 if (fw_caps == FW_CAPS16)
371 cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
372 else
373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
374}
375
376/*
377 * csio_mb_process_read_port_rsp - FW PORT command response processing helper
378 * @hw: The HW structure
379 * @mbp: Mailbox structure
380 * @retval: Mailbox return value from Firmware
381 * @caps: port capabilities
382 *
383 */
384void
385csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
386 enum fw_retval *retval, uint16_t fw_caps,
387 u32 *pcaps, u32 *acaps)
388{
389 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
390
391 *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
392
393 if (*retval == FW_SUCCESS) {
394 if (fw_caps == FW_CAPS16) {
395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
397 } else {
398 *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
399 *acaps = be32_to_cpu(rsp->u.info32.acaps32);
400 }
401 }
402}
403
404/*
405 * csio_mb_initialize - FW INITIALIZE command helper
406 * @hw: The HW structure
407 * @mbp: Mailbox structure
408 * @tmo: COmmand timeout
409 * @cbfn: Callback, if any.
410 *
411 */
412void
413csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
414 void (*cbfn) (struct csio_hw *, struct csio_mb *))
415{
416 struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
417
418 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
419
420 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) |
421 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
422 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
423
424}
425
426/*
427 * csio_mb_iq_alloc - Initializes the mailbox to allocate an
428 * Ingress DMA queue in the firmware.
429 *
430 * @hw: The hw structure
431 * @mbp: Mailbox structure to initialize
432 * @priv: Private object
433 * @mb_tmo: Mailbox time-out period (in ms).
434 * @iq_params: Ingress queue params needed for allocation.
435 * @cbfn: The call-back function
436 *
437 *
438 */
439static void
440csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
441 uint32_t mb_tmo, struct csio_iq_params *iq_params,
442 void (*cbfn) (struct csio_hw *, struct csio_mb *))
443{
444 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
445
446 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
447
448 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
449 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
450 FW_IQ_CMD_PFN_V(iq_params->pfn) |
451 FW_IQ_CMD_VFN_V(iq_params->vfn));
452
453 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F |
454 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
455
456 cmdp->type_to_iqandstindex = htonl(
457 FW_IQ_CMD_VIID_V(iq_params->viid) |
458 FW_IQ_CMD_TYPE_V(iq_params->type) |
459 FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch));
460
461 cmdp->fl0size = htons(iq_params->fl0size);
462 cmdp->fl0size = htons(iq_params->fl1size);
463
464} /* csio_mb_iq_alloc */
465
466/*
467 * csio_mb_iq_write - Initializes the mailbox for writing into an
468 * Ingress DMA Queue.
469 *
470 * @hw: The HW structure
471 * @mbp: Mailbox structure to initialize
472 * @priv: Private object
473 * @mb_tmo: Mailbox time-out period (in ms).
474 * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
475 * @iq_params: Ingress queue params needed for writing.
476 * @cbfn: The call-back function
477 *
478 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
479 * because this IQ write request can be cascaded with a previous
480 * IQ alloc request, and we dont want to over-write the bits set by
481 * that request. This logic will work even in a non-cascaded case, since the
482 * cmdp structure is zeroed out by CSIO_INIT_MBP.
483 */
484static void
485csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
486 uint32_t mb_tmo, bool cascaded_req,
487 struct csio_iq_params *iq_params,
488 void (*cbfn) (struct csio_hw *, struct csio_mb *))
489{
490 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
491
492 uint32_t iq_start_stop = (iq_params->iq_start) ?
493 FW_IQ_CMD_IQSTART_F :
494 FW_IQ_CMD_IQSTOP_F;
495 int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING);
496
497 /*
498 * If this IQ write is cascaded with IQ alloc request, do not
499 * re-initialize with 0's.
500 *
501 */
502 if (!cascaded_req)
503 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
504
505 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) |
506 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
507 FW_IQ_CMD_PFN_V(iq_params->pfn) |
508 FW_IQ_CMD_VFN_V(iq_params->vfn));
509 cmdp->alloc_to_len16 |= htonl(iq_start_stop |
510 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
511 cmdp->iqid |= htons(iq_params->iqid);
512 cmdp->fl0id |= htons(iq_params->fl0id);
513 cmdp->fl1id |= htons(iq_params->fl1id);
514 cmdp->type_to_iqandstindex |= htonl(
515 FW_IQ_CMD_IQANDST_V(iq_params->iqandst) |
516 FW_IQ_CMD_IQANUS_V(iq_params->iqanus) |
517 FW_IQ_CMD_IQANUD_V(iq_params->iqanud) |
518 FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex));
519 cmdp->iqdroprss_to_iqesize |= htons(
520 FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) |
521 FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) |
522 FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) |
523 FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) |
524 FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) |
525 FW_IQ_CMD_IQESIZE_V(iq_params->iqesize));
526
527 cmdp->iqsize |= htons(iq_params->iqsize);
528 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
529
530 if (iq_params->type == 0) {
531 cmdp->iqns_to_fl0congen |= htonl(
532 FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)|
533 FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen));
534 }
535
536 if (iq_params->fl0size && iq_params->fl0addr &&
537 (iq_params->fl0id != 0xFFFF)) {
538
539 cmdp->iqns_to_fl0congen |= htonl(
540 FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
541 FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
542 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
543 FW_IQ_CMD_FL0DATARO_V(relaxed) |
544 FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
545 FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
546 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
547 FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) |
548 FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) |
549 FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) |
550 FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) |
551 FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh));
552 cmdp->fl0size |= htons(iq_params->fl0size);
553 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
554 }
555} /* csio_mb_iq_write */
556
557/*
558 * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
559 * Ingress DMA Queue.
560 *
561 * @hw: The HW structure
562 * @mbp: Mailbox structure to initialize
563 * @priv: Private data.
564 * @mb_tmo: Mailbox time-out period (in ms).
565 * @iq_params: Ingress queue params needed for allocation & writing.
566 * @cbfn: The call-back function
567 *
568 *
569 */
570void
571csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
572 uint32_t mb_tmo, struct csio_iq_params *iq_params,
573 void (*cbfn) (struct csio_hw *, struct csio_mb *))
574{
575 csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
576 csio_mb_iq_write(hw, mbp, priv, mb_tmo, cascaded_req: true, iq_params, cbfn);
577} /* csio_mb_iq_alloc_write */
578
579/*
580 * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
581 * of ingress DMA queue mailbox's response.
582 *
583 * @hw: The HW structure.
584 * @mbp: Mailbox structure to initialize.
585 * @retval: Firmware return value.
586 * @iq_params: Ingress queue parameters, after allocation and write.
587 *
588 */
589void
590csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
591 enum fw_retval *ret_val,
592 struct csio_iq_params *iq_params)
593{
594 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
595
596 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
597 if (*ret_val == FW_SUCCESS) {
598 iq_params->physiqid = ntohs(rsp->physiqid);
599 iq_params->iqid = ntohs(rsp->iqid);
600 iq_params->fl0id = ntohs(rsp->fl0id);
601 iq_params->fl1id = ntohs(rsp->fl1id);
602 } else {
603 iq_params->physiqid = iq_params->iqid =
604 iq_params->fl0id = iq_params->fl1id = 0;
605 }
606} /* csio_mb_iq_alloc_write_rsp */
607
608/*
609 * csio_mb_iq_free - Initializes the mailbox for freeing a
610 * specified Ingress DMA Queue.
611 *
612 * @hw: The HW structure
613 * @mbp: Mailbox structure to initialize
614 * @priv: Private data
615 * @mb_tmo: Mailbox time-out period (in ms).
616 * @iq_params: Parameters of ingress queue, that is to be freed.
617 * @cbfn: The call-back function
618 *
619 *
620 */
621void
622csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
623 uint32_t mb_tmo, struct csio_iq_params *iq_params,
624 void (*cbfn) (struct csio_hw *, struct csio_mb *))
625{
626 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
627
628 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
629
630 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
631 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
632 FW_IQ_CMD_PFN_V(iq_params->pfn) |
633 FW_IQ_CMD_VFN_V(iq_params->vfn));
634 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F |
635 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
636 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type));
637
638 cmdp->iqid = htons(iq_params->iqid);
639 cmdp->fl0id = htons(iq_params->fl0id);
640 cmdp->fl1id = htons(iq_params->fl1id);
641
642} /* csio_mb_iq_free */
643
644/*
645 * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
646 * an offload-egress queue.
647 *
648 * @hw: The HW structure
649 * @mbp: Mailbox structure to initialize
650 * @priv: Private data
651 * @mb_tmo: Mailbox time-out period (in ms).
652 * @eq_ofld_params: (Offload) Egress queue parameters.
653 * @cbfn: The call-back function
654 *
655 *
656 */
657static void
658csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
659 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
660 void (*cbfn) (struct csio_hw *, struct csio_mb *))
661{
662 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
663
664 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
665 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
666 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
667 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
668 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
669 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
670 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
671
672} /* csio_mb_eq_ofld_alloc */
673
674/*
675 * csio_mb_eq_ofld_write - Initializes the mailbox for writing
676 * an alloacted offload-egress queue.
677 *
678 * @hw: The HW structure
679 * @mbp: Mailbox structure to initialize
680 * @priv: Private data
681 * @mb_tmo: Mailbox time-out period (in ms).
682 * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
683 * @eq_ofld_params: (Offload) Egress queue parameters.
684 * @cbfn: The call-back function
685 *
686 *
687 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
688 * because this EQ write request can be cascaded with a previous
689 * EQ alloc request, and we dont want to over-write the bits set by
690 * that request. This logic will work even in a non-cascaded case, since the
691 * cmdp structure is zeroed out by CSIO_INIT_MBP.
692 */
693static void
694csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
695 uint32_t mb_tmo, bool cascaded_req,
696 struct csio_eq_params *eq_ofld_params,
697 void (*cbfn) (struct csio_hw *, struct csio_mb *))
698{
699 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
700
701 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
702 FW_EQ_OFLD_CMD_EQSTART_F :
703 FW_EQ_OFLD_CMD_EQSTOP_F;
704
705 /*
706 * If this EQ write is cascaded with EQ alloc request, do not
707 * re-initialize with 0's.
708 *
709 */
710 if (!cascaded_req)
711 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
712
713 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
714 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
715 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
716 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
717 cmdp->alloc_to_len16 |= htonl(eq_start_stop |
718 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
719
720 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
721
722 cmdp->fetchszm_to_iqid |= htonl(
723 FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) |
724 FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) |
725 FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) |
726 FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid));
727
728 cmdp->dcaen_to_eqsize |= htonl(
729 FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) |
730 FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) |
731 FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) |
732 FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) |
733 FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) |
734 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) |
735 FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize));
736
737 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
738
739} /* csio_mb_eq_ofld_write */
740
741/*
742 * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
743 * writing into an Engress DMA Queue.
744 *
745 * @hw: The HW structure
746 * @mbp: Mailbox structure to initialize
747 * @priv: Private data.
748 * @mb_tmo: Mailbox time-out period (in ms).
749 * @eq_ofld_params: (Offload) Egress queue parameters.
750 * @cbfn: The call-back function
751 *
752 *
753 */
754void
755csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
756 void *priv, uint32_t mb_tmo,
757 struct csio_eq_params *eq_ofld_params,
758 void (*cbfn) (struct csio_hw *, struct csio_mb *))
759{
760 csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
761 csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, cascaded_req: true,
762 eq_ofld_params, cbfn);
763} /* csio_mb_eq_ofld_alloc_write */
764
765/*
766 * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
767 * & write egress DMA queue mailbox's response.
768 *
769 * @hw: The HW structure.
770 * @mbp: Mailbox structure to initialize.
771 * @retval: Firmware return value.
772 * @eq_ofld_params: (Offload) Egress queue parameters.
773 *
774 */
775void
776csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
777 struct csio_mb *mbp, enum fw_retval *ret_val,
778 struct csio_eq_params *eq_ofld_params)
779{
780 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
781
782 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
783
784 if (*ret_val == FW_SUCCESS) {
785 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G(
786 ntohl(rsp->eqid_pkd));
787 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G(
788 ntohl(rsp->physeqid_pkd));
789 } else
790 eq_ofld_params->eqid = 0;
791
792} /* csio_mb_eq_ofld_alloc_write_rsp */
793
794/*
795 * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
796 * specified Engress DMA Queue.
797 *
798 * @hw: The HW structure
799 * @mbp: Mailbox structure to initialize
800 * @priv: Private data area.
801 * @mb_tmo: Mailbox time-out period (in ms).
802 * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed.
803 * @cbfn: The call-back function
804 *
805 *
806 */
807void
808csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
809 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
810 void (*cbfn) (struct csio_hw *, struct csio_mb *))
811{
812 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
813
814 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
815
816 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
817 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
818 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
819 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
820 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F |
821 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
822 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
823
824} /* csio_mb_eq_ofld_free */
825
826/*
827 * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
828 * condition.
829 *
830 * @ln: The Lnode structure
831 * @mbp: Mailbox structure to initialize
832 * @mb_tmo: Mailbox time-out period (in ms).
833 * @cbfn: The call back function.
834 *
835 *
836 */
837void
838csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
839 uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
840 uint8_t cos, bool link_status, uint32_t fcfi,
841 void (*cbfn) (struct csio_hw *, struct csio_mb *))
842{
843 struct fw_fcoe_link_cmd *cmdp =
844 (struct fw_fcoe_link_cmd *)(mbp->mb);
845
846 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
847
848 cmdp->op_to_portid = htonl((
849 FW_CMD_OP_V(FW_FCOE_LINK_CMD) |
850 FW_CMD_REQUEST_F |
851 FW_CMD_WRITE_F |
852 FW_FCOE_LINK_CMD_PORTID(port_id)));
853 cmdp->sub_opcode_fcfi = htonl(
854 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
855 FW_FCOE_LINK_CMD_FCFI(fcfi));
856 cmdp->lstatus = link_status;
857 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
858
859} /* csio_write_fcoe_link_cond_init_mb */
860
861/*
862 * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
863 * resource information(FW_GET_RES_INFO_CMD).
864 *
865 * @hw: The HW structure
866 * @mbp: Mailbox structure to initialize
867 * @mb_tmo: Mailbox time-out period (in ms).
868 * @cbfn: The call-back function
869 *
870 *
871 */
872void
873csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
874 uint32_t mb_tmo,
875 void (*cbfn) (struct csio_hw *, struct csio_mb *))
876{
877 struct fw_fcoe_res_info_cmd *cmdp =
878 (struct fw_fcoe_res_info_cmd *)(mbp->mb);
879
880 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
881
882 cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) |
883 FW_CMD_REQUEST_F |
884 FW_CMD_READ_F));
885
886 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
887
888} /* csio_fcoe_read_res_info_init_mb */
889
890/*
891 * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
892 * in the firmware (FW_FCOE_VNP_CMD).
893 *
894 * @ln: The Lnode structure.
895 * @mbp: Mailbox structure to initialize.
896 * @mb_tmo: Mailbox time-out period (in ms).
897 * @fcfi: FCF Index.
898 * @vnpi: vnpi
899 * @iqid: iqid
900 * @vnport_wwnn: vnport WWNN
901 * @vnport_wwpn: vnport WWPN
902 * @cbfn: The call-back function.
903 *
904 *
905 */
906void
907csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
908 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
909 uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
910 void (*cbfn) (struct csio_hw *, struct csio_mb *))
911{
912 struct fw_fcoe_vnp_cmd *cmdp =
913 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
914
915 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
916
917 cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
918 FW_CMD_REQUEST_F |
919 FW_CMD_EXEC_F |
920 FW_FCOE_VNP_CMD_FCFI(fcfi)));
921
922 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
923 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
924
925 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
926
927 cmdp->iqid = htons(iqid);
928
929 if (!wwn_to_u64(wwn: vnport_wwnn) && !wwn_to_u64(wwn: vnport_wwpn))
930 cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
931
932 if (vnport_wwnn)
933 memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
934 if (vnport_wwpn)
935 memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
936
937} /* csio_fcoe_vnp_alloc_init_mb */
938
939/*
940 * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
941 * @ln: The Lnode structure.
942 * @mbp: Mailbox structure to initialize.
943 * @mb_tmo: Mailbox time-out period (in ms).
944 * @fcfi: FCF Index.
945 * @vnpi: vnpi
946 * @cbfn: The call-back handler.
947 */
948void
949csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
950 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
951 void (*cbfn) (struct csio_hw *, struct csio_mb *))
952{
953 struct fw_fcoe_vnp_cmd *cmdp =
954 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
955
956 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
957 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
958 FW_CMD_REQUEST_F |
959 FW_CMD_READ_F |
960 FW_FCOE_VNP_CMD_FCFI(fcfi));
961 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
962 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
963}
964
965/*
966 * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
967 * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
968 *
969 * @ln: The Lnode structure.
970 * @mbp: Mailbox structure to initialize.
971 * @mb_tmo: Mailbox time-out period (in ms).
972 * @fcfi: FCF flow id
973 * @vnpi: VNP flow id
974 * @cbfn: The call-back function.
975 * Return: None
976 */
977void
978csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
979 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
980 void (*cbfn) (struct csio_hw *, struct csio_mb *))
981{
982 struct fw_fcoe_vnp_cmd *cmdp =
983 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
984
985 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
986
987 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
988 FW_CMD_REQUEST_F |
989 FW_CMD_EXEC_F |
990 FW_FCOE_VNP_CMD_FCFI(fcfi));
991 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
992 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
993 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
994}
995
996/*
997 * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
998 * FCF records.
999 *
1000 * @ln: The Lnode structure
1001 * @mbp: Mailbox structure to initialize
1002 * @mb_tmo: Mailbox time-out period (in ms).
1003 * @fcf_params: FC-Forwarder parameters.
1004 * @cbfn: The call-back function
1005 *
1006 *
1007 */
1008void
1009csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1010 uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
1011 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1012{
1013 struct fw_fcoe_fcf_cmd *cmdp =
1014 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
1015
1016 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1017
1018 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) |
1019 FW_CMD_REQUEST_F |
1020 FW_CMD_READ_F |
1021 FW_FCOE_FCF_CMD_FCFI(fcfi));
1022 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
1023
1024} /* csio_fcoe_read_fcf_init_mb */
1025
1026void
1027csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
1028 uint32_t mb_tmo,
1029 struct fw_fcoe_port_cmd_params *portparams,
1030 void (*cbfn)(struct csio_hw *,
1031 struct csio_mb *))
1032{
1033 struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1034
1035 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
1036 mbp->mb_size = 64;
1037
1038 cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) |
1039 FW_CMD_REQUEST_F | FW_CMD_READ_F);
1040 cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16));
1041
1042 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
1043 FW_FCOE_STATS_CMD_PORT(portparams->portid);
1044
1045 cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
1046 FW_FCOE_STATS_CMD_PORT_VALID;
1047
1048} /* csio_fcoe_read_portparams_init_mb */
1049
1050void
1051csio_mb_process_portparams_rsp(struct csio_hw *hw,
1052 struct csio_mb *mbp,
1053 enum fw_retval *retval,
1054 struct fw_fcoe_port_cmd_params *portparams,
1055 struct fw_fcoe_port_stats *portstats)
1056{
1057 struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1058 struct fw_fcoe_port_stats stats;
1059 uint8_t *src;
1060 uint8_t *dst;
1061
1062 *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16));
1063
1064 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
1065
1066 if (*retval == FW_SUCCESS) {
1067 dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
1068 src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
1069 memcpy(dst, src, (portparams->nstats * 8));
1070 if (portparams->idx == 1) {
1071 /* Get the first 6 flits from the Mailbox */
1072 portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
1073 portstats->tx_bcast_frames = stats.tx_bcast_frames;
1074 portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
1075 portstats->tx_mcast_frames = stats.tx_mcast_frames;
1076 portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
1077 portstats->tx_ucast_frames = stats.tx_ucast_frames;
1078 }
1079 if (portparams->idx == 7) {
1080 /* Get the second 6 flits from the Mailbox */
1081 portstats->tx_drop_frames = stats.tx_drop_frames;
1082 portstats->tx_offload_bytes = stats.tx_offload_bytes;
1083 portstats->tx_offload_frames = stats.tx_offload_frames;
1084#if 0
1085 portstats->rx_pf_bytes = stats.rx_pf_bytes;
1086 portstats->rx_pf_frames = stats.rx_pf_frames;
1087#endif
1088 portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
1089 portstats->rx_bcast_frames = stats.rx_bcast_frames;
1090 portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
1091 }
1092 if (portparams->idx == 13) {
1093 /* Get the last 4 flits from the Mailbox */
1094 portstats->rx_mcast_frames = stats.rx_mcast_frames;
1095 portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
1096 portstats->rx_ucast_frames = stats.rx_ucast_frames;
1097 portstats->rx_err_frames = stats.rx_err_frames;
1098 }
1099 }
1100}
1101
1102/* Entry points/APIs for MB module */
1103/*
1104 * csio_mb_intr_enable - Enable Interrupts from mailboxes.
1105 * @hw: The HW structure
1106 *
1107 * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
1108 */
1109void
1110csio_mb_intr_enable(struct csio_hw *hw)
1111{
1112 csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
1113 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
1114}
1115
1116/*
1117 * csio_mb_intr_disable - Disable Interrupts from mailboxes.
1118 * @hw: The HW structure
1119 *
1120 * Disable bit in HostInterruptEnable CIM register.
1121 */
1122void
1123csio_mb_intr_disable(struct csio_hw *hw)
1124{
1125 csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
1126 MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
1127 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
1128}
1129
1130static void
1131csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
1132{
1133 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
1134
1135 if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) {
1136 csio_info(hw, "FW print message:\n");
1137 csio_info(hw, "\tdebug->dprtstridx = %d\n",
1138 ntohs(dbg->u.prt.dprtstridx));
1139 csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
1140 ntohl(dbg->u.prt.dprtstrparam0));
1141 csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
1142 ntohl(dbg->u.prt.dprtstrparam1));
1143 csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
1144 ntohl(dbg->u.prt.dprtstrparam2));
1145 csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
1146 ntohl(dbg->u.prt.dprtstrparam3));
1147 } else {
1148 /* This is a FW assertion */
1149 csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
1150 dbg->u.assert.filename_0_7,
1151 ntohl(dbg->u.assert.line),
1152 ntohl(dbg->u.assert.x),
1153 ntohl(dbg->u.assert.y));
1154 }
1155}
1156
1157static void
1158csio_mb_debug_cmd_handler(struct csio_hw *hw)
1159{
1160 int i;
1161 __be64 cmd[CSIO_MB_MAX_REGS];
1162 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
1163 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
1164 int size = sizeof(struct fw_debug_cmd);
1165
1166 /* Copy mailbox data */
1167 for (i = 0; i < size; i += 8)
1168 cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
1169
1170 csio_mb_dump_fw_dbg(hw, cmd);
1171
1172 /* Notify FW of mailbox by setting owner as UP */
1173 csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
1174 MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
1175
1176 csio_rd_reg32(hw, ctl_reg);
1177 wmb();
1178}
1179
1180/*
1181 * csio_mb_issue - generic routine for issuing Mailbox commands.
1182 * @hw: The HW structure
1183 * @mbp: Mailbox command to issue
1184 *
1185 * Caller should hold hw lock across this call.
1186 */
1187int
1188csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1189{
1190 uint32_t owner, ctl;
1191 int i;
1192 uint32_t ii;
1193 __be64 *cmd = mbp->mb;
1194 __be64 hdr;
1195 struct csio_mbm *mbm = &hw->mbm;
1196 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
1197 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
1198 int size = mbp->mb_size;
1199 int rv = -EINVAL;
1200 struct fw_cmd_hdr *fw_hdr;
1201
1202 /* Determine mode */
1203 if (mbp->mb_cbfn == NULL) {
1204 /* Need to issue/get results in the same context */
1205 if (mbp->tmo < CSIO_MB_POLL_FREQ) {
1206 csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
1207 goto error_out;
1208 }
1209 } else if (!csio_is_host_intr_enabled(hw) ||
1210 !csio_is_hw_intr_enabled(hw)) {
1211 csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
1212 *((uint8_t *)mbp->mb));
1213 goto error_out;
1214 }
1215
1216 if (mbm->mcurrent != NULL) {
1217 /* Queue mbox cmd, if another mbox cmd is active */
1218 if (mbp->mb_cbfn == NULL) {
1219 rv = -EBUSY;
1220 csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
1221 hw->pfn, *((uint8_t *)mbp->mb));
1222
1223 goto error_out;
1224 } else {
1225 list_add_tail(new: &mbp->list, head: &mbm->req_q);
1226 CSIO_INC_STATS(mbm, n_activeq);
1227
1228 return 0;
1229 }
1230 }
1231
1232 /* Now get ownership of mailbox */
1233 owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
1234
1235 if (!csio_mb_is_host_owner(owner)) {
1236
1237 for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
1238 owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
1239 /*
1240 * Mailbox unavailable. In immediate mode, fail the command.
1241 * In other modes, enqueue the request.
1242 */
1243 if (!csio_mb_is_host_owner(owner)) {
1244 if (mbp->mb_cbfn == NULL) {
1245 rv = owner ? -EBUSY : -ETIMEDOUT;
1246
1247 csio_dbg(hw,
1248 "Couldn't own Mailbox %x op:0x%x "
1249 "owner:%x\n",
1250 hw->pfn, *((uint8_t *)mbp->mb), owner);
1251 goto error_out;
1252 } else {
1253 if (mbm->mcurrent == NULL) {
1254 csio_err(hw,
1255 "Couldn't own Mailbox %x "
1256 "op:0x%x owner:%x\n",
1257 hw->pfn, *((uint8_t *)mbp->mb),
1258 owner);
1259 csio_err(hw,
1260 "No outstanding driver"
1261 " mailbox as well\n");
1262 goto error_out;
1263 }
1264 }
1265 }
1266 }
1267
1268 /* Mailbox is available, copy mailbox data into it */
1269 for (i = 0; i < size; i += 8) {
1270 csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
1271 cmd++;
1272 }
1273
1274 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1275
1276 /* Start completion timers in non-immediate modes and notify FW */
1277 if (mbp->mb_cbfn != NULL) {
1278 mbm->mcurrent = mbp;
1279 mod_timer(timer: &mbm->timer, expires: jiffies + msecs_to_jiffies(m: mbp->tmo));
1280 csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
1281 MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
1282 } else
1283 csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
1284 ctl_reg);
1285
1286 /* Flush posted writes */
1287 csio_rd_reg32(hw, ctl_reg);
1288 wmb();
1289
1290 CSIO_INC_STATS(mbm, n_req);
1291
1292 if (mbp->mb_cbfn)
1293 return 0;
1294
1295 /* Poll for completion in immediate mode */
1296 cmd = mbp->mb;
1297
1298 for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
1299 mdelay(CSIO_MB_POLL_FREQ);
1300
1301 /* Check for response */
1302 ctl = csio_rd_reg32(hw, ctl_reg);
1303 if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
1304
1305 if (!(ctl & MBMSGVALID_F)) {
1306 csio_wr_reg32(hw, 0, ctl_reg);
1307 continue;
1308 }
1309
1310 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1311
1312 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1313 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1314
1315 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1316 case FW_DEBUG_CMD:
1317 csio_mb_debug_cmd_handler(hw);
1318 continue;
1319 }
1320
1321 /* Copy response */
1322 for (i = 0; i < size; i += 8)
1323 *cmd++ = cpu_to_be64(csio_rd_reg64
1324 (hw, data_reg + i));
1325 csio_wr_reg32(hw, 0, ctl_reg);
1326
1327 if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
1328 CSIO_INC_STATS(mbm, n_err);
1329
1330 CSIO_INC_STATS(mbm, n_rsp);
1331 return 0;
1332 }
1333 }
1334
1335 CSIO_INC_STATS(mbm, n_tmo);
1336
1337 csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
1338 hw->pfn, *((uint8_t *)cmd));
1339
1340 return -ETIMEDOUT;
1341
1342error_out:
1343 CSIO_INC_STATS(mbm, n_err);
1344 return rv;
1345}
1346
1347/*
1348 * csio_mb_completions - Completion handler for Mailbox commands
1349 * @hw: The HW structure
1350 * @cbfn_q: Completion queue.
1351 *
1352 */
1353void
1354csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
1355{
1356 struct csio_mb *mbp;
1357 struct csio_mbm *mbm = &hw->mbm;
1358 enum fw_retval rv;
1359
1360 while (!list_empty(head: cbfn_q)) {
1361 mbp = list_first_entry(cbfn_q, struct csio_mb, list);
1362 list_del_init(entry: &mbp->list);
1363
1364 rv = csio_mb_fw_retval(mbp);
1365 if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
1366 CSIO_INC_STATS(mbm, n_err);
1367 else if (rv != FW_HOSTERROR)
1368 CSIO_INC_STATS(mbm, n_rsp);
1369
1370 if (mbp->mb_cbfn)
1371 mbp->mb_cbfn(hw, mbp);
1372 }
1373}
1374
1375static void
1376csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
1377{
1378 static char *mod_str[] = {
1379 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
1380 };
1381
1382 struct csio_pport *port = &hw->pport[port_id];
1383
1384 if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
1385 csio_info(hw, "Port:%d - port module unplugged\n", port_id);
1386 else if (port->mod_type < ARRAY_SIZE(mod_str))
1387 csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
1388 mod_str[port->mod_type]);
1389 else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1390 csio_info(hw,
1391 "Port:%d - unsupported optical port module "
1392 "inserted\n", port_id);
1393 else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1394 csio_info(hw,
1395 "Port:%d - unknown port module inserted, forcing "
1396 "TWINAX\n", port_id);
1397 else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
1398 csio_info(hw, "Port:%d - transceiver module error\n", port_id);
1399 else
1400 csio_info(hw, "Port:%d - unknown module type %d inserted\n",
1401 port_id, port->mod_type);
1402}
1403
1404int
1405csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1406{
1407 uint8_t opcode = *(uint8_t *)cmd;
1408 struct fw_port_cmd *pcmd;
1409 uint8_t port_id;
1410 uint32_t link_status;
1411 uint16_t action;
1412 uint8_t mod_type;
1413 fw_port_cap32_t linkattr;
1414
1415 if (opcode == FW_PORT_CMD) {
1416 pcmd = (struct fw_port_cmd *)cmd;
1417 port_id = FW_PORT_CMD_PORTID_G(
1418 ntohl(pcmd->op_to_portid));
1419 action = FW_PORT_CMD_ACTION_G(
1420 ntohl(pcmd->action_to_len16));
1421 if (action != FW_PORT_ACTION_GET_PORT_INFO &&
1422 action != FW_PORT_ACTION_GET_PORT_INFO32) {
1423 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
1424 action);
1425 return -EINVAL;
1426 }
1427
1428 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
1429 link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
1430 mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
1431 linkattr = lstatus_to_fwcap(lstatus: link_status);
1432
1433 hw->pport[port_id].link_status =
1434 FW_PORT_CMD_LSTATUS_G(link_status);
1435 } else {
1436 link_status =
1437 ntohl(pcmd->u.info32.lstatus32_to_cbllen32);
1438 mod_type = FW_PORT_CMD_MODTYPE32_G(link_status);
1439 linkattr = ntohl(pcmd->u.info32.linkattr32);
1440
1441 hw->pport[port_id].link_status =
1442 FW_PORT_CMD_LSTATUS32_G(link_status);
1443 }
1444
1445 hw->pport[port_id].link_speed = fwcap_to_fwspeed(acaps: linkattr);
1446
1447 csio_info(hw, "Port:%x - LINK %s\n", port_id,
1448 hw->pport[port_id].link_status ? "UP" : "DOWN");
1449
1450 if (mod_type != hw->pport[port_id].mod_type) {
1451 hw->pport[port_id].mod_type = mod_type;
1452 csio_mb_portmod_changed(hw, port_id);
1453 }
1454 } else if (opcode == FW_DEBUG_CMD) {
1455 csio_mb_dump_fw_dbg(hw, cmd);
1456 } else {
1457 csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
1458 return -EINVAL;
1459 }
1460
1461 return 0;
1462}
1463
1464/*
1465 * csio_mb_isr_handler - Handle mailboxes related interrupts.
1466 * @hw: The HW structure
1467 *
1468 * Called from the ISR to handle Mailbox related interrupts.
1469 * HW Lock should be held across this call.
1470 */
1471int
1472csio_mb_isr_handler(struct csio_hw *hw)
1473{
1474 struct csio_mbm *mbm = &hw->mbm;
1475 struct csio_mb *mbp = mbm->mcurrent;
1476 __be64 *cmd;
1477 uint32_t ctl, cim_cause, pl_cause;
1478 int i;
1479 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
1480 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
1481 int size;
1482 __be64 hdr;
1483 struct fw_cmd_hdr *fw_hdr;
1484
1485 pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
1486 cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
1487
1488 if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
1489 CSIO_INC_STATS(hw, n_mbint_unexp);
1490 return -EINVAL;
1491 }
1492
1493 /*
1494 * The cause registers below HAVE to be cleared in the SAME
1495 * order as below: The low level cause register followed by
1496 * the upper level cause register. In other words, CIM-cause
1497 * first followed by PL-Cause next.
1498 */
1499 csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
1500 csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
1501
1502 ctl = csio_rd_reg32(hw, ctl_reg);
1503
1504 if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
1505
1506 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1507
1508 if (!(ctl & MBMSGVALID_F)) {
1509 csio_warn(hw,
1510 "Stray mailbox interrupt recvd,"
1511 " mailbox data not valid\n");
1512 csio_wr_reg32(hw, 0, ctl_reg);
1513 /* Flush */
1514 csio_rd_reg32(hw, ctl_reg);
1515 return -EINVAL;
1516 }
1517
1518 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1519 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1520
1521 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1522 case FW_DEBUG_CMD:
1523 csio_mb_debug_cmd_handler(hw);
1524 return -EINVAL;
1525#if 0
1526 case FW_ERROR_CMD:
1527 case FW_INITIALIZE_CMD: /* When we are not master */
1528#endif
1529 }
1530
1531 CSIO_ASSERT(mbp != NULL);
1532
1533 cmd = mbp->mb;
1534 size = mbp->mb_size;
1535 /* Get response */
1536 for (i = 0; i < size; i += 8)
1537 *cmd++ = cpu_to_be64(csio_rd_reg64
1538 (hw, data_reg + i));
1539
1540 csio_wr_reg32(hw, 0, ctl_reg);
1541 /* Flush */
1542 csio_rd_reg32(hw, ctl_reg);
1543
1544 mbm->mcurrent = NULL;
1545
1546 /* Add completion to tail of cbfn queue */
1547 list_add_tail(new: &mbp->list, head: &mbm->cbfn_q);
1548 CSIO_INC_STATS(mbm, n_cbfnq);
1549
1550 /*
1551 * Enqueue event to EventQ. Events processing happens
1552 * in Event worker thread context
1553 */
1554 if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
1555 CSIO_INC_STATS(hw, n_evt_drop);
1556
1557 return 0;
1558
1559 } else {
1560 /*
1561 * We can get here if mailbox MSIX vector is shared,
1562 * or in INTx case. Or a stray interrupt.
1563 */
1564 csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
1565 CSIO_INC_STATS(hw, n_int_stray);
1566 return -EINVAL;
1567 }
1568}
1569
1570/*
1571 * csio_mb_tmo_handler - Timeout handler
1572 * @hw: The HW structure
1573 *
1574 */
1575struct csio_mb *
1576csio_mb_tmo_handler(struct csio_hw *hw)
1577{
1578 struct csio_mbm *mbm = &hw->mbm;
1579 struct csio_mb *mbp = mbm->mcurrent;
1580 struct fw_cmd_hdr *fw_hdr;
1581
1582 /*
1583 * Could be a race b/w the completion handler and the timer
1584 * and the completion handler won that race.
1585 */
1586 if (mbp == NULL) {
1587 CSIO_DB_ASSERT(0);
1588 return NULL;
1589 }
1590
1591 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
1592
1593 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
1594 FW_CMD_OP_G(ntohl(fw_hdr->hi)));
1595
1596 mbm->mcurrent = NULL;
1597 CSIO_INC_STATS(mbm, n_tmo);
1598 fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT));
1599
1600 return mbp;
1601}
1602
1603/*
1604 * csio_mb_cancel_all - Cancel all waiting commands.
1605 * @hw: The HW structure
1606 * @cbfn_q: The callback queue.
1607 *
1608 * Caller should hold hw lock across this call.
1609 */
1610void
1611csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
1612{
1613 struct csio_mb *mbp;
1614 struct csio_mbm *mbm = &hw->mbm;
1615 struct fw_cmd_hdr *hdr;
1616 struct list_head *tmp;
1617
1618 if (mbm->mcurrent) {
1619 mbp = mbm->mcurrent;
1620
1621 /* Stop mailbox completion timer */
1622 del_timer_sync(timer: &mbm->timer);
1623
1624 /* Add completion to tail of cbfn queue */
1625 list_add_tail(new: &mbp->list, head: cbfn_q);
1626 mbm->mcurrent = NULL;
1627 }
1628
1629 if (!list_empty(head: &mbm->req_q)) {
1630 list_splice_tail_init(list: &mbm->req_q, head: cbfn_q);
1631 mbm->stats.n_activeq = 0;
1632 }
1633
1634 if (!list_empty(head: &mbm->cbfn_q)) {
1635 list_splice_tail_init(list: &mbm->cbfn_q, head: cbfn_q);
1636 mbm->stats.n_cbfnq = 0;
1637 }
1638
1639 if (list_empty(head: cbfn_q))
1640 return;
1641
1642 list_for_each(tmp, cbfn_q) {
1643 mbp = (struct csio_mb *)tmp;
1644 hdr = (struct fw_cmd_hdr *)(mbp->mb);
1645
1646 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
1647 hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
1648
1649 CSIO_INC_STATS(mbm, n_cancel);
1650 hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR));
1651 }
1652}
1653
1654/*
1655 * csio_mbm_init - Initialize Mailbox module
1656 * @mbm: Mailbox module
1657 * @hw: The HW structure
1658 * @timer: Timing function for interrupting mailboxes
1659 *
1660 * Initialize timer and the request/response queues.
1661 */
1662int
1663csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
1664 void (*timer_fn)(struct timer_list *))
1665{
1666 mbm->hw = hw;
1667 timer_setup(&mbm->timer, timer_fn, 0);
1668
1669 INIT_LIST_HEAD(list: &mbm->req_q);
1670 INIT_LIST_HEAD(list: &mbm->cbfn_q);
1671 csio_set_mb_intr_idx(mbm, -1);
1672
1673 return 0;
1674}
1675
1676/*
1677 * csio_mbm_exit - Uninitialize mailbox module
1678 * @mbm: Mailbox module
1679 *
1680 * Stop timer.
1681 */
1682void
1683csio_mbm_exit(struct csio_mbm *mbm)
1684{
1685 del_timer_sync(timer: &mbm->timer);
1686
1687 CSIO_DB_ASSERT(mbm->mcurrent == NULL);
1688 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
1689 CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
1690}
1691

source code of linux/drivers/scsi/csiostor/csio_mb.c