1 | /* bnx2fc_hwi.c: QLogic Linux FCoE offload driver. |
2 | * This file contains the code that low level functions that interact |
3 | * with 57712 FCoE firmware. |
4 | * |
5 | * Copyright (c) 2008-2013 Broadcom Corporation |
6 | * Copyright (c) 2014-2016 QLogic Corporation |
7 | * Copyright (c) 2016-2017 Cavium Inc. |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation. |
12 | * |
13 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) |
14 | */ |
15 | |
16 | #include "bnx2fc.h" |
17 | |
18 | DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); |
19 | |
20 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, |
21 | struct fcoe_kcqe *new_cqe_kcqe); |
22 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, |
23 | struct fcoe_kcqe *ofld_kcqe); |
24 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, |
25 | struct fcoe_kcqe *ofld_kcqe); |
26 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); |
27 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, |
28 | struct fcoe_kcqe *destroy_kcqe); |
29 | |
30 | int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) |
31 | { |
32 | struct fcoe_kwqe_stat stat_req; |
33 | struct kwqe *kwqe_arr[2]; |
34 | int num_kwqes = 1; |
35 | int rc = 0; |
36 | |
37 | memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); |
38 | stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; |
39 | stat_req.hdr.flags = |
40 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
41 | |
42 | stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; |
43 | stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); |
44 | |
45 | kwqe_arr[0] = (struct kwqe *) &stat_req; |
46 | |
47 | if (hba->cnic && hba->cnic->submit_kwqes) |
48 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
49 | |
50 | return rc; |
51 | } |
52 | |
53 | /** |
54 | * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w |
55 | * |
56 | * @hba: adapter structure pointer |
57 | * |
58 | * Send down FCoE firmware init KWQEs which initiates the initial handshake |
59 | * with the f/w. |
60 | * |
61 | */ |
62 | int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) |
63 | { |
64 | struct fcoe_kwqe_init1 fcoe_init1; |
65 | struct fcoe_kwqe_init2 fcoe_init2; |
66 | struct fcoe_kwqe_init3 fcoe_init3; |
67 | struct kwqe *kwqe_arr[3]; |
68 | int num_kwqes = 3; |
69 | int rc = 0; |
70 | |
71 | if (!hba->cnic) { |
72 | printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n" ); |
73 | return -ENODEV; |
74 | } |
75 | |
76 | /* fill init1 KWQE */ |
77 | memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); |
78 | fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; |
79 | fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << |
80 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
81 | |
82 | fcoe_init1.num_tasks = hba->max_tasks; |
83 | fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; |
84 | fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; |
85 | fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; |
86 | fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; |
87 | fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; |
88 | fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); |
89 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; |
90 | fcoe_init1.task_list_pbl_addr_hi = |
91 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); |
92 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
93 | |
94 | fcoe_init1.flags = (PAGE_SHIFT << |
95 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); |
96 | |
97 | fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; |
98 | |
99 | /* fill init2 KWQE */ |
100 | memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); |
101 | fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; |
102 | fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << |
103 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
104 | |
105 | fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; |
106 | fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; |
107 | |
108 | |
109 | fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; |
110 | fcoe_init2.hash_tbl_pbl_addr_hi = (u32) |
111 | ((u64) hba->hash_tbl_pbl_dma >> 32); |
112 | |
113 | fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; |
114 | fcoe_init2.t2_hash_tbl_addr_hi = (u32) |
115 | ((u64) hba->t2_hash_tbl_dma >> 32); |
116 | |
117 | fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; |
118 | fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) |
119 | ((u64) hba->t2_hash_tbl_ptr_dma >> 32); |
120 | |
121 | fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; |
122 | |
123 | /* fill init3 KWQE */ |
124 | memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); |
125 | fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; |
126 | fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << |
127 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
128 | fcoe_init3.error_bit_map_lo = 0xffffffff; |
129 | fcoe_init3.error_bit_map_hi = 0xffffffff; |
130 | |
131 | /* |
132 | * enable both cached connection and cached tasks |
133 | * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both |
134 | */ |
135 | fcoe_init3.perf_config = 3; |
136 | |
137 | kwqe_arr[0] = (struct kwqe *) &fcoe_init1; |
138 | kwqe_arr[1] = (struct kwqe *) &fcoe_init2; |
139 | kwqe_arr[2] = (struct kwqe *) &fcoe_init3; |
140 | |
141 | if (hba->cnic && hba->cnic->submit_kwqes) |
142 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
143 | |
144 | return rc; |
145 | } |
146 | int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) |
147 | { |
148 | struct fcoe_kwqe_destroy fcoe_destroy; |
149 | struct kwqe *kwqe_arr[2]; |
150 | int num_kwqes = 1; |
151 | int rc = -1; |
152 | |
153 | /* fill destroy KWQE */ |
154 | memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); |
155 | fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; |
156 | fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << |
157 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
158 | kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; |
159 | |
160 | if (hba->cnic && hba->cnic->submit_kwqes) |
161 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
162 | return rc; |
163 | } |
164 | |
165 | /** |
166 | * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process |
167 | * |
168 | * @port: port structure pointer |
169 | * @tgt: bnx2fc_rport structure pointer |
170 | */ |
171 | int bnx2fc_send_session_ofld_req(struct fcoe_port *port, |
172 | struct bnx2fc_rport *tgt) |
173 | { |
174 | struct fc_lport *lport = port->lport; |
175 | struct bnx2fc_interface *interface = port->priv; |
176 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
177 | struct bnx2fc_hba *hba = interface->hba; |
178 | struct kwqe *kwqe_arr[4]; |
179 | struct fcoe_kwqe_conn_offload1 ofld_req1; |
180 | struct fcoe_kwqe_conn_offload2 ofld_req2; |
181 | struct fcoe_kwqe_conn_offload3 ofld_req3; |
182 | struct fcoe_kwqe_conn_offload4 ofld_req4; |
183 | struct fc_rport_priv *rdata = tgt->rdata; |
184 | struct fc_rport *rport = tgt->rport; |
185 | int num_kwqes = 4; |
186 | u32 port_id; |
187 | int rc = 0; |
188 | u16 conn_id; |
189 | |
190 | /* Initialize offload request 1 structure */ |
191 | memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); |
192 | |
193 | ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; |
194 | ofld_req1.hdr.flags = |
195 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
196 | |
197 | |
198 | conn_id = (u16)tgt->fcoe_conn_id; |
199 | ofld_req1.fcoe_conn_id = conn_id; |
200 | |
201 | |
202 | ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; |
203 | ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); |
204 | |
205 | ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; |
206 | ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); |
207 | |
208 | ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; |
209 | ofld_req1.rq_first_pbe_addr_hi = |
210 | (u32)((u64) tgt->rq_dma >> 32); |
211 | |
212 | ofld_req1.rq_prod = 0x8000; |
213 | |
214 | /* Initialize offload request 2 structure */ |
215 | memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); |
216 | |
217 | ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; |
218 | ofld_req2.hdr.flags = |
219 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
220 | |
221 | ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; |
222 | |
223 | ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; |
224 | ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); |
225 | |
226 | ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; |
227 | ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); |
228 | |
229 | ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; |
230 | ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); |
231 | |
232 | /* Initialize offload request 3 structure */ |
233 | memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); |
234 | |
235 | ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; |
236 | ofld_req3.hdr.flags = |
237 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
238 | |
239 | ofld_req3.vlan_tag = interface->vlan_id << |
240 | FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; |
241 | ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; |
242 | |
243 | port_id = fc_host_port_id(lport->host); |
244 | if (port_id == 0) { |
245 | BNX2FC_HBA_DBG(lport, fmt: "ofld_req: port_id = 0, link down?\n" ); |
246 | return -EINVAL; |
247 | } |
248 | |
249 | /* |
250 | * Store s_id of the initiator for further reference. This will |
251 | * be used during disable/destroy during linkdown processing as |
252 | * when the lport is reset, the port_id also is reset to 0 |
253 | */ |
254 | tgt->sid = port_id; |
255 | ofld_req3.s_id[0] = (port_id & 0x000000FF); |
256 | ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; |
257 | ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; |
258 | |
259 | port_id = rport->port_id; |
260 | ofld_req3.d_id[0] = (port_id & 0x000000FF); |
261 | ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; |
262 | ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; |
263 | |
264 | ofld_req3.tx_total_conc_seqs = rdata->max_seq; |
265 | |
266 | ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; |
267 | ofld_req3.rx_max_fc_pay_len = lport->mfs; |
268 | |
269 | ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; |
270 | ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; |
271 | ofld_req3.rx_open_seqs_exch_c3 = 1; |
272 | |
273 | ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; |
274 | ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); |
275 | |
276 | /* set mul_n_port_ids supported flag to 0, until it is supported */ |
277 | ofld_req3.flags = 0; |
278 | /* |
279 | ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << |
280 | FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); |
281 | */ |
282 | /* Info from PLOGI response */ |
283 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << |
284 | FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); |
285 | |
286 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << |
287 | FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); |
288 | |
289 | /* |
290 | * Info from PRLI response, this info is used for sequence level error |
291 | * recovery support |
292 | */ |
293 | if (tgt->dev_type == TYPE_TAPE) { |
294 | ofld_req3.flags |= 1 << |
295 | FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; |
296 | ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) |
297 | ? 1 : 0) << |
298 | FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); |
299 | } |
300 | |
301 | /* vlan flag */ |
302 | ofld_req3.flags |= (interface->vlan_enabled << |
303 | FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); |
304 | |
305 | /* C2_VALID and ACK flags are not set as they are not supported */ |
306 | |
307 | |
308 | /* Initialize offload request 4 structure */ |
309 | memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); |
310 | ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; |
311 | ofld_req4.hdr.flags = |
312 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
313 | |
314 | ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; |
315 | |
316 | |
317 | ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; |
318 | /* local mac */ |
319 | ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; |
320 | ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; |
321 | ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; |
322 | ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; |
323 | ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; |
324 | ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
325 | /* fcf mac */ |
326 | ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; |
327 | ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; |
328 | ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; |
329 | ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; |
330 | ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; |
331 | |
332 | ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; |
333 | ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); |
334 | |
335 | ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; |
336 | ofld_req4.confq_pbl_base_addr_hi = |
337 | (u32)((u64) tgt->confq_pbl_dma >> 32); |
338 | |
339 | kwqe_arr[0] = (struct kwqe *) &ofld_req1; |
340 | kwqe_arr[1] = (struct kwqe *) &ofld_req2; |
341 | kwqe_arr[2] = (struct kwqe *) &ofld_req3; |
342 | kwqe_arr[3] = (struct kwqe *) &ofld_req4; |
343 | |
344 | if (hba->cnic && hba->cnic->submit_kwqes) |
345 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
346 | |
347 | return rc; |
348 | } |
349 | |
350 | /** |
351 | * bnx2fc_send_session_enable_req - initiates FCoE Session enablement |
352 | * |
353 | * @port: port structure pointer |
354 | * @tgt: bnx2fc_rport structure pointer |
355 | */ |
356 | int bnx2fc_send_session_enable_req(struct fcoe_port *port, |
357 | struct bnx2fc_rport *tgt) |
358 | { |
359 | struct kwqe *kwqe_arr[2]; |
360 | struct bnx2fc_interface *interface = port->priv; |
361 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
362 | struct bnx2fc_hba *hba = interface->hba; |
363 | struct fcoe_kwqe_conn_enable_disable enbl_req; |
364 | struct fc_lport *lport = port->lport; |
365 | struct fc_rport *rport = tgt->rport; |
366 | int num_kwqes = 1; |
367 | int rc = 0; |
368 | u32 port_id; |
369 | |
370 | memset(&enbl_req, 0x00, |
371 | sizeof(struct fcoe_kwqe_conn_enable_disable)); |
372 | enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; |
373 | enbl_req.hdr.flags = |
374 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
375 | |
376 | enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; |
377 | /* local mac */ |
378 | enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; |
379 | enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; |
380 | enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; |
381 | enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; |
382 | enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; |
383 | memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); |
384 | |
385 | enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
386 | enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; |
387 | enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; |
388 | enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; |
389 | enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; |
390 | enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; |
391 | |
392 | port_id = fc_host_port_id(lport->host); |
393 | if (port_id != tgt->sid) { |
394 | printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," |
395 | "sid = 0x%x\n" , port_id, tgt->sid); |
396 | port_id = tgt->sid; |
397 | } |
398 | enbl_req.s_id[0] = (port_id & 0x000000FF); |
399 | enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; |
400 | enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; |
401 | |
402 | port_id = rport->port_id; |
403 | enbl_req.d_id[0] = (port_id & 0x000000FF); |
404 | enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; |
405 | enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; |
406 | enbl_req.vlan_tag = interface->vlan_id << |
407 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
408 | enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; |
409 | enbl_req.vlan_flag = interface->vlan_enabled; |
410 | enbl_req.context_id = tgt->context_id; |
411 | enbl_req.conn_id = tgt->fcoe_conn_id; |
412 | |
413 | kwqe_arr[0] = (struct kwqe *) &enbl_req; |
414 | |
415 | if (hba->cnic && hba->cnic->submit_kwqes) |
416 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
417 | return rc; |
418 | } |
419 | |
420 | /** |
421 | * bnx2fc_send_session_disable_req - initiates FCoE Session disable |
422 | * |
423 | * @port: port structure pointer |
424 | * @tgt: bnx2fc_rport structure pointer |
425 | */ |
426 | int bnx2fc_send_session_disable_req(struct fcoe_port *port, |
427 | struct bnx2fc_rport *tgt) |
428 | { |
429 | struct bnx2fc_interface *interface = port->priv; |
430 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
431 | struct bnx2fc_hba *hba = interface->hba; |
432 | struct fcoe_kwqe_conn_enable_disable disable_req; |
433 | struct kwqe *kwqe_arr[2]; |
434 | struct fc_rport *rport = tgt->rport; |
435 | int num_kwqes = 1; |
436 | int rc = 0; |
437 | u32 port_id; |
438 | |
439 | memset(&disable_req, 0x00, |
440 | sizeof(struct fcoe_kwqe_conn_enable_disable)); |
441 | disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; |
442 | disable_req.hdr.flags = |
443 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
444 | |
445 | disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; |
446 | disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; |
447 | disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; |
448 | disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; |
449 | disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; |
450 | disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; |
451 | |
452 | disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
453 | disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; |
454 | disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; |
455 | disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; |
456 | disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; |
457 | disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; |
458 | |
459 | port_id = tgt->sid; |
460 | disable_req.s_id[0] = (port_id & 0x000000FF); |
461 | disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; |
462 | disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; |
463 | |
464 | |
465 | port_id = rport->port_id; |
466 | disable_req.d_id[0] = (port_id & 0x000000FF); |
467 | disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; |
468 | disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; |
469 | disable_req.context_id = tgt->context_id; |
470 | disable_req.conn_id = tgt->fcoe_conn_id; |
471 | disable_req.vlan_tag = interface->vlan_id << |
472 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
473 | disable_req.vlan_tag |= |
474 | 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; |
475 | disable_req.vlan_flag = interface->vlan_enabled; |
476 | |
477 | kwqe_arr[0] = (struct kwqe *) &disable_req; |
478 | |
479 | if (hba->cnic && hba->cnic->submit_kwqes) |
480 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
481 | |
482 | return rc; |
483 | } |
484 | |
485 | /** |
486 | * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy |
487 | * |
488 | * @hba: adapter structure pointer |
489 | * @tgt: bnx2fc_rport structure pointer |
490 | */ |
491 | int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, |
492 | struct bnx2fc_rport *tgt) |
493 | { |
494 | struct fcoe_kwqe_conn_destroy destroy_req; |
495 | struct kwqe *kwqe_arr[2]; |
496 | int num_kwqes = 1; |
497 | int rc = 0; |
498 | |
499 | memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); |
500 | destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; |
501 | destroy_req.hdr.flags = |
502 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
503 | |
504 | destroy_req.context_id = tgt->context_id; |
505 | destroy_req.conn_id = tgt->fcoe_conn_id; |
506 | |
507 | kwqe_arr[0] = (struct kwqe *) &destroy_req; |
508 | |
509 | if (hba->cnic && hba->cnic->submit_kwqes) |
510 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
511 | |
512 | return rc; |
513 | } |
514 | |
515 | static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) |
516 | { |
517 | struct bnx2fc_lport *blport; |
518 | |
519 | spin_lock_bh(lock: &hba->hba_lock); |
520 | list_for_each_entry(blport, &hba->vports, list) { |
521 | if (blport->lport == lport) { |
522 | spin_unlock_bh(lock: &hba->hba_lock); |
523 | return true; |
524 | } |
525 | } |
526 | spin_unlock_bh(lock: &hba->hba_lock); |
527 | return false; |
528 | |
529 | } |
530 | |
531 | |
532 | static void bnx2fc_unsol_els_work(struct work_struct *work) |
533 | { |
534 | struct bnx2fc_unsol_els *unsol_els; |
535 | struct fc_lport *lport; |
536 | struct bnx2fc_hba *hba; |
537 | struct fc_frame *fp; |
538 | |
539 | unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); |
540 | lport = unsol_els->lport; |
541 | fp = unsol_els->fp; |
542 | hba = unsol_els->hba; |
543 | if (is_valid_lport(hba, lport)) |
544 | fc_exch_recv(lport, fp); |
545 | kfree(objp: unsol_els); |
546 | } |
547 | |
548 | void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, |
549 | unsigned char *buf, |
550 | u32 frame_len, u16 l2_oxid) |
551 | { |
552 | struct fcoe_port *port = tgt->port; |
553 | struct fc_lport *lport = port->lport; |
554 | struct bnx2fc_interface *interface = port->priv; |
555 | struct bnx2fc_unsol_els *unsol_els; |
556 | struct fc_frame_header *fh; |
557 | struct fc_frame *fp; |
558 | struct sk_buff *skb; |
559 | u32 payload_len; |
560 | u32 crc; |
561 | u8 op; |
562 | |
563 | |
564 | unsol_els = kzalloc(size: sizeof(*unsol_els), GFP_ATOMIC); |
565 | if (!unsol_els) { |
566 | BNX2FC_TGT_DBG(tgt, fmt: "Unable to allocate unsol_work\n" ); |
567 | return; |
568 | } |
569 | |
570 | BNX2FC_TGT_DBG(tgt, fmt: "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n" , |
571 | l2_oxid, frame_len); |
572 | |
573 | payload_len = frame_len - sizeof(struct fc_frame_header); |
574 | |
575 | fp = fc_frame_alloc(dev: lport, len: payload_len); |
576 | if (!fp) { |
577 | printk(KERN_ERR PFX "fc_frame_alloc failure\n" ); |
578 | kfree(objp: unsol_els); |
579 | return; |
580 | } |
581 | |
582 | fh = (struct fc_frame_header *) fc_frame_header_get(fp); |
583 | /* Copy FC Frame header and payload into the frame */ |
584 | memcpy(fh, buf, frame_len); |
585 | |
586 | if (l2_oxid != FC_XID_UNKNOWN) |
587 | fh->fh_ox_id = htons(l2_oxid); |
588 | |
589 | skb = fp_skb(fp); |
590 | |
591 | if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || |
592 | (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { |
593 | |
594 | if (fh->fh_type == FC_TYPE_ELS) { |
595 | op = fc_frame_payload_op(fp); |
596 | if ((op == ELS_TEST) || (op == ELS_ESTC) || |
597 | (op == ELS_FAN) || (op == ELS_CSU)) { |
598 | /* |
599 | * No need to reply for these |
600 | * ELS requests |
601 | */ |
602 | printk(KERN_ERR PFX "dropping ELS 0x%x\n" , op); |
603 | kfree_skb(skb); |
604 | kfree(objp: unsol_els); |
605 | return; |
606 | } |
607 | } |
608 | crc = fcoe_fc_crc(fp); |
609 | fc_frame_init(fp); |
610 | fr_dev(fp) = lport; |
611 | fr_sof(fp) = FC_SOF_I3; |
612 | fr_eof(fp) = FC_EOF_T; |
613 | fr_crc(fp) = cpu_to_le32(~crc); |
614 | unsol_els->lport = lport; |
615 | unsol_els->hba = interface->hba; |
616 | unsol_els->fp = fp; |
617 | INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); |
618 | queue_work(wq: bnx2fc_wq, work: &unsol_els->unsol_els_work); |
619 | } else { |
620 | BNX2FC_HBA_DBG(lport, fmt: "fh_r_ctl = 0x%x\n" , fh->fh_r_ctl); |
621 | kfree_skb(skb); |
622 | kfree(objp: unsol_els); |
623 | } |
624 | } |
625 | |
626 | static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) |
627 | { |
628 | u8 num_rq; |
629 | struct fcoe_err_report_entry *err_entry; |
630 | unsigned char *rq_data; |
631 | unsigned char *buf = NULL, *buf1; |
632 | int i; |
633 | u16 xid; |
634 | u32 frame_len, len; |
635 | struct bnx2fc_cmd *io_req = NULL; |
636 | struct bnx2fc_interface *interface = tgt->port->priv; |
637 | struct bnx2fc_hba *hba = interface->hba; |
638 | int rc = 0; |
639 | u64 err_warn_bit_map; |
640 | u8 err_warn = 0xff; |
641 | |
642 | |
643 | BNX2FC_TGT_DBG(tgt, fmt: "Entered UNSOL COMPLETION wqe = 0x%x\n" , wqe); |
644 | switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { |
645 | case FCOE_UNSOLICITED_FRAME_CQE_TYPE: |
646 | frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> |
647 | FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; |
648 | |
649 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; |
650 | |
651 | spin_lock_bh(lock: &tgt->tgt_lock); |
652 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_items: num_rq); |
653 | spin_unlock_bh(lock: &tgt->tgt_lock); |
654 | |
655 | if (rq_data) { |
656 | buf = rq_data; |
657 | } else { |
658 | buf1 = buf = kmalloc(size: (num_rq * BNX2FC_RQ_BUF_SZ), |
659 | GFP_ATOMIC); |
660 | |
661 | if (!buf1) { |
662 | BNX2FC_TGT_DBG(tgt, fmt: "Memory alloc failure\n" ); |
663 | break; |
664 | } |
665 | |
666 | for (i = 0; i < num_rq; i++) { |
667 | spin_lock_bh(lock: &tgt->tgt_lock); |
668 | rq_data = (unsigned char *) |
669 | bnx2fc_get_next_rqe(tgt, num_items: 1); |
670 | spin_unlock_bh(lock: &tgt->tgt_lock); |
671 | len = BNX2FC_RQ_BUF_SZ; |
672 | memcpy(buf1, rq_data, len); |
673 | buf1 += len; |
674 | } |
675 | } |
676 | bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, |
677 | FC_XID_UNKNOWN); |
678 | |
679 | if (buf != rq_data) |
680 | kfree(objp: buf); |
681 | spin_lock_bh(lock: &tgt->tgt_lock); |
682 | bnx2fc_return_rqe(tgt, num_items: num_rq); |
683 | spin_unlock_bh(lock: &tgt->tgt_lock); |
684 | break; |
685 | |
686 | case FCOE_ERROR_DETECTION_CQE_TYPE: |
687 | /* |
688 | * In case of error reporting CQE a single RQ entry |
689 | * is consumed. |
690 | */ |
691 | spin_lock_bh(lock: &tgt->tgt_lock); |
692 | num_rq = 1; |
693 | err_entry = (struct fcoe_err_report_entry *) |
694 | bnx2fc_get_next_rqe(tgt, num_items: 1); |
695 | xid = err_entry->fc_hdr.ox_id; |
696 | BNX2FC_TGT_DBG(tgt, fmt: "Unsol Error Frame OX_ID = 0x%x\n" , xid); |
697 | BNX2FC_TGT_DBG(tgt, fmt: "err_warn_bitmap = %08x:%08x\n" , |
698 | err_entry->data.err_warn_bitmap_hi, |
699 | err_entry->data.err_warn_bitmap_lo); |
700 | BNX2FC_TGT_DBG(tgt, fmt: "buf_offsets - tx = 0x%x, rx = 0x%x\n" , |
701 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
702 | |
703 | if (xid > hba->max_xid) { |
704 | BNX2FC_TGT_DBG(tgt, fmt: "xid(0x%x) out of FW range\n" , |
705 | xid); |
706 | goto ret_err_rqe; |
707 | } |
708 | |
709 | |
710 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; |
711 | if (!io_req) |
712 | goto ret_err_rqe; |
713 | |
714 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { |
715 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n" ); |
716 | goto ret_err_rqe; |
717 | } |
718 | |
719 | if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, |
720 | addr: &io_req->req_flags)) { |
721 | BNX2FC_IO_DBG(io_req, fmt: "unsol_err: cleanup in " |
722 | "progress.. ignore unsol err\n" ); |
723 | goto ret_err_rqe; |
724 | } |
725 | |
726 | err_warn_bit_map = (u64) |
727 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | |
728 | (u64)err_entry->data.err_warn_bitmap_lo; |
729 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { |
730 | if (err_warn_bit_map & (u64)((u64)1 << i)) { |
731 | err_warn = i; |
732 | break; |
733 | } |
734 | } |
735 | |
736 | /* |
737 | * If ABTS is already in progress, and FW error is |
738 | * received after that, do not cancel the timeout_work |
739 | * and let the error recovery continue by explicitly |
740 | * logging out the target, when the ABTS eventually |
741 | * times out. |
742 | */ |
743 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { |
744 | printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " |
745 | "in ABTS processing\n" , xid); |
746 | goto ret_err_rqe; |
747 | } |
748 | BNX2FC_TGT_DBG(tgt, fmt: "err = 0x%x\n" , err_warn); |
749 | if (tgt->dev_type != TYPE_TAPE) |
750 | goto skip_rec; |
751 | switch (err_warn) { |
752 | case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: |
753 | case FCOE_ERROR_CODE_DATA_OOO_RO: |
754 | case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: |
755 | case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: |
756 | case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: |
757 | case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: |
758 | BNX2FC_TGT_DBG(tgt, fmt: "REC TOV popped for xid - 0x%x\n" , |
759 | xid); |
760 | memcpy(&io_req->err_entry, err_entry, |
761 | sizeof(struct fcoe_err_report_entry)); |
762 | if (!test_bit(BNX2FC_FLAG_SRR_SENT, |
763 | &io_req->req_flags)) { |
764 | spin_unlock_bh(lock: &tgt->tgt_lock); |
765 | rc = bnx2fc_send_rec(orig_io_req: io_req); |
766 | spin_lock_bh(lock: &tgt->tgt_lock); |
767 | |
768 | if (rc) |
769 | goto skip_rec; |
770 | } else |
771 | printk(KERN_ERR PFX "SRR in progress\n" ); |
772 | goto ret_err_rqe; |
773 | default: |
774 | break; |
775 | } |
776 | |
777 | skip_rec: |
778 | set_bit(BNX2FC_FLAG_ISSUE_ABTS, addr: &io_req->req_flags); |
779 | /* |
780 | * Cancel the timeout_work, as we received IO |
781 | * completion with FW error. |
782 | */ |
783 | if (cancel_delayed_work(dwork: &io_req->timeout_work)) |
784 | kref_put(kref: &io_req->refcount, release: bnx2fc_cmd_release); |
785 | |
786 | rc = bnx2fc_initiate_abts(io_req); |
787 | if (rc != SUCCESS) { |
788 | printk(KERN_ERR PFX "err_warn: initiate_abts " |
789 | "failed xid = 0x%x. issue cleanup\n" , |
790 | io_req->xid); |
791 | bnx2fc_initiate_cleanup(io_req); |
792 | } |
793 | ret_err_rqe: |
794 | bnx2fc_return_rqe(tgt, num_items: 1); |
795 | spin_unlock_bh(lock: &tgt->tgt_lock); |
796 | break; |
797 | |
798 | case FCOE_WARNING_DETECTION_CQE_TYPE: |
799 | /* |
800 | *In case of warning reporting CQE a single RQ entry |
801 | * is consumes. |
802 | */ |
803 | spin_lock_bh(lock: &tgt->tgt_lock); |
804 | num_rq = 1; |
805 | err_entry = (struct fcoe_err_report_entry *) |
806 | bnx2fc_get_next_rqe(tgt, num_items: 1); |
807 | xid = cpu_to_be16(err_entry->fc_hdr.ox_id); |
808 | BNX2FC_TGT_DBG(tgt, fmt: "Unsol Warning Frame OX_ID = 0x%x\n" , xid); |
809 | BNX2FC_TGT_DBG(tgt, fmt: "err_warn_bitmap = %08x:%08x" , |
810 | err_entry->data.err_warn_bitmap_hi, |
811 | err_entry->data.err_warn_bitmap_lo); |
812 | BNX2FC_TGT_DBG(tgt, fmt: "buf_offsets - tx = 0x%x, rx = 0x%x" , |
813 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
814 | |
815 | if (xid > hba->max_xid) { |
816 | BNX2FC_TGT_DBG(tgt, fmt: "xid(0x%x) out of FW range\n" , xid); |
817 | goto ret_warn_rqe; |
818 | } |
819 | |
820 | err_warn_bit_map = (u64) |
821 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | |
822 | (u64)err_entry->data.err_warn_bitmap_lo; |
823 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { |
824 | if (err_warn_bit_map & ((u64)1 << i)) { |
825 | err_warn = i; |
826 | break; |
827 | } |
828 | } |
829 | BNX2FC_TGT_DBG(tgt, fmt: "warn = 0x%x\n" , err_warn); |
830 | |
831 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; |
832 | if (!io_req) |
833 | goto ret_warn_rqe; |
834 | |
835 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { |
836 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n" ); |
837 | goto ret_warn_rqe; |
838 | } |
839 | |
840 | memcpy(&io_req->err_entry, err_entry, |
841 | sizeof(struct fcoe_err_report_entry)); |
842 | |
843 | if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) |
844 | /* REC_TOV is not a warning code */ |
845 | BUG_ON(1); |
846 | else |
847 | BNX2FC_TGT_DBG(tgt, fmt: "Unsolicited warning\n" ); |
848 | ret_warn_rqe: |
849 | bnx2fc_return_rqe(tgt, num_items: 1); |
850 | spin_unlock_bh(lock: &tgt->tgt_lock); |
851 | break; |
852 | |
853 | default: |
854 | printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n" ); |
855 | break; |
856 | } |
857 | } |
858 | |
859 | void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, |
860 | unsigned char *rq_data, u8 num_rq, |
861 | struct fcoe_task_ctx_entry *task) |
862 | { |
863 | struct fcoe_port *port = tgt->port; |
864 | struct bnx2fc_interface *interface = port->priv; |
865 | struct bnx2fc_hba *hba = interface->hba; |
866 | struct bnx2fc_cmd *io_req; |
867 | |
868 | u16 xid; |
869 | u8 cmd_type; |
870 | u8 rx_state = 0; |
871 | |
872 | spin_lock_bh(lock: &tgt->tgt_lock); |
873 | |
874 | xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; |
875 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; |
876 | |
877 | if (io_req == NULL) { |
878 | printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n" ); |
879 | spin_unlock_bh(lock: &tgt->tgt_lock); |
880 | return; |
881 | } |
882 | |
883 | /* Timestamp IO completion time */ |
884 | cmd_type = io_req->cmd_type; |
885 | |
886 | rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & |
887 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> |
888 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); |
889 | |
890 | /* Process other IO completion types */ |
891 | switch (cmd_type) { |
892 | case BNX2FC_SCSI_CMD: |
893 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { |
894 | bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, |
895 | rq_data); |
896 | spin_unlock_bh(lock: &tgt->tgt_lock); |
897 | return; |
898 | } |
899 | |
900 | if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) |
901 | bnx2fc_process_abts_compl(io_req, task, num_rq); |
902 | else if (rx_state == |
903 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) |
904 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); |
905 | else |
906 | printk(KERN_ERR PFX "Invalid rx state - %d\n" , |
907 | rx_state); |
908 | break; |
909 | |
910 | case BNX2FC_TASK_MGMT_CMD: |
911 | BNX2FC_IO_DBG(io_req, fmt: "Processing TM complete\n" ); |
912 | bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); |
913 | break; |
914 | |
915 | case BNX2FC_ABTS: |
916 | /* |
917 | * ABTS request received by firmware. ABTS response |
918 | * will be delivered to the task belonging to the IO |
919 | * that was aborted |
920 | */ |
921 | BNX2FC_IO_DBG(io_req, fmt: "cq_compl- ABTS sent out by fw\n" ); |
922 | kref_put(kref: &io_req->refcount, release: bnx2fc_cmd_release); |
923 | break; |
924 | |
925 | case BNX2FC_ELS: |
926 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) |
927 | bnx2fc_process_els_compl(els_req: io_req, task, num_rq); |
928 | else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) |
929 | bnx2fc_process_abts_compl(io_req, task, num_rq); |
930 | else if (rx_state == |
931 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) |
932 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); |
933 | else |
934 | printk(KERN_ERR PFX "Invalid rx state = %d\n" , |
935 | rx_state); |
936 | break; |
937 | |
938 | case BNX2FC_CLEANUP: |
939 | BNX2FC_IO_DBG(io_req, fmt: "cq_compl- cleanup resp rcvd\n" ); |
940 | kref_put(kref: &io_req->refcount, release: bnx2fc_cmd_release); |
941 | break; |
942 | |
943 | case BNX2FC_SEQ_CLEANUP: |
944 | BNX2FC_IO_DBG(io_req, fmt: "cq_compl(0x%x) - seq cleanup resp\n" , |
945 | io_req->xid); |
946 | bnx2fc_process_seq_cleanup_compl(seq_clnup_req: io_req, task, rx_state); |
947 | kref_put(kref: &io_req->refcount, release: bnx2fc_cmd_release); |
948 | break; |
949 | |
950 | default: |
951 | printk(KERN_ERR PFX "Invalid cmd_type %d\n" , cmd_type); |
952 | break; |
953 | } |
954 | spin_unlock_bh(lock: &tgt->tgt_lock); |
955 | } |
956 | |
957 | void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) |
958 | { |
959 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; |
960 | u32 msg; |
961 | |
962 | wmb(); |
963 | rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << |
964 | FCOE_CQE_TOGGLE_BIT_SHIFT); |
965 | msg = *((u32 *)rx_db); |
966 | writel(cpu_to_le32(msg), addr: tgt->ctx_base); |
967 | |
968 | } |
969 | |
970 | static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, |
971 | unsigned char *rq_data, u8 num_rq, |
972 | struct fcoe_task_ctx_entry *task) |
973 | { |
974 | struct bnx2fc_work *work; |
975 | work = kzalloc(size: sizeof(struct bnx2fc_work), GFP_ATOMIC); |
976 | if (!work) |
977 | return NULL; |
978 | |
979 | INIT_LIST_HEAD(list: &work->list); |
980 | work->tgt = tgt; |
981 | work->wqe = wqe; |
982 | work->num_rq = num_rq; |
983 | work->task = task; |
984 | if (rq_data) |
985 | memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); |
986 | |
987 | return work; |
988 | } |
989 | |
990 | /* Pending work request completion */ |
991 | static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) |
992 | { |
993 | unsigned int cpu = wqe % num_possible_cpus(); |
994 | struct bnx2fc_percpu_s *fps; |
995 | struct bnx2fc_work *work; |
996 | struct fcoe_task_ctx_entry *task; |
997 | struct fcoe_task_ctx_entry *task_page; |
998 | struct fcoe_port *port = tgt->port; |
999 | struct bnx2fc_interface *interface = port->priv; |
1000 | struct bnx2fc_hba *hba = interface->hba; |
1001 | unsigned char *rq_data = NULL; |
1002 | unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; |
1003 | int task_idx, index; |
1004 | u16 xid; |
1005 | u8 num_rq; |
1006 | int i; |
1007 | |
1008 | xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; |
1009 | if (xid >= hba->max_tasks) { |
1010 | pr_err(PFX "ERROR:xid out of range\n" ); |
1011 | return false; |
1012 | } |
1013 | |
1014 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; |
1015 | index = xid % BNX2FC_TASKS_PER_PAGE; |
1016 | task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; |
1017 | task = &task_page[index]; |
1018 | |
1019 | num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & |
1020 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> |
1021 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); |
1022 | |
1023 | memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); |
1024 | |
1025 | if (!num_rq) |
1026 | goto num_rq_zero; |
1027 | |
1028 | rq_data = bnx2fc_get_next_rqe(tgt, num_items: 1); |
1029 | |
1030 | if (num_rq > 1) { |
1031 | /* We do not need extra sense data */ |
1032 | for (i = 1; i < num_rq; i++) |
1033 | bnx2fc_get_next_rqe(tgt, num_items: 1); |
1034 | } |
1035 | |
1036 | if (rq_data) |
1037 | memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); |
1038 | |
1039 | /* return RQ entries */ |
1040 | for (i = 0; i < num_rq; i++) |
1041 | bnx2fc_return_rqe(tgt, num_items: 1); |
1042 | |
1043 | num_rq_zero: |
1044 | |
1045 | fps = &per_cpu(bnx2fc_percpu, cpu); |
1046 | spin_lock_bh(lock: &fps->fp_work_lock); |
1047 | if (fps->iothread) { |
1048 | work = bnx2fc_alloc_work(tgt, wqe, rq_data: rq_data_buff, |
1049 | num_rq, task); |
1050 | if (work) { |
1051 | list_add_tail(new: &work->list, head: &fps->work_list); |
1052 | wake_up_process(tsk: fps->iothread); |
1053 | spin_unlock_bh(lock: &fps->fp_work_lock); |
1054 | return true; |
1055 | } |
1056 | } |
1057 | spin_unlock_bh(lock: &fps->fp_work_lock); |
1058 | bnx2fc_process_cq_compl(tgt, wqe, |
1059 | rq_data: rq_data_buff, num_rq, task); |
1060 | |
1061 | return true; |
1062 | } |
1063 | |
1064 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) |
1065 | { |
1066 | struct fcoe_cqe *cq; |
1067 | u32 cq_cons; |
1068 | struct fcoe_cqe *cqe; |
1069 | u32 num_free_sqes = 0; |
1070 | u32 num_cqes = 0; |
1071 | u16 wqe; |
1072 | |
1073 | /* |
1074 | * cq_lock is a low contention lock used to protect |
1075 | * the CQ data structure from being freed up during |
1076 | * the upload operation |
1077 | */ |
1078 | spin_lock_bh(lock: &tgt->cq_lock); |
1079 | |
1080 | if (!tgt->cq) { |
1081 | printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n" ); |
1082 | spin_unlock_bh(lock: &tgt->cq_lock); |
1083 | return 0; |
1084 | } |
1085 | cq = tgt->cq; |
1086 | cq_cons = tgt->cq_cons_idx; |
1087 | cqe = &cq[cq_cons]; |
1088 | |
1089 | while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == |
1090 | (tgt->cq_curr_toggle_bit << |
1091 | FCOE_CQE_TOGGLE_BIT_SHIFT)) { |
1092 | |
1093 | /* new entry on the cq */ |
1094 | if (wqe & FCOE_CQE_CQE_TYPE) { |
1095 | /* Unsolicited event notification */ |
1096 | bnx2fc_process_unsol_compl(tgt, wqe); |
1097 | } else { |
1098 | if (bnx2fc_pending_work(tgt, wqe)) |
1099 | num_free_sqes++; |
1100 | } |
1101 | cqe++; |
1102 | tgt->cq_cons_idx++; |
1103 | num_cqes++; |
1104 | |
1105 | if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { |
1106 | tgt->cq_cons_idx = 0; |
1107 | cqe = cq; |
1108 | tgt->cq_curr_toggle_bit = |
1109 | 1 - tgt->cq_curr_toggle_bit; |
1110 | } |
1111 | } |
1112 | if (num_cqes) { |
1113 | /* Arm CQ only if doorbell is mapped */ |
1114 | if (tgt->ctx_base) |
1115 | bnx2fc_arm_cq(tgt); |
1116 | atomic_add(i: num_free_sqes, v: &tgt->free_sqes); |
1117 | } |
1118 | spin_unlock_bh(lock: &tgt->cq_lock); |
1119 | return 0; |
1120 | } |
1121 | |
1122 | /** |
1123 | * bnx2fc_fastpath_notification - process global event queue (KCQ) |
1124 | * |
1125 | * @hba: adapter structure pointer |
1126 | * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry |
1127 | * |
1128 | * Fast path event notification handler |
1129 | */ |
1130 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, |
1131 | struct fcoe_kcqe *new_cqe_kcqe) |
1132 | { |
1133 | u32 conn_id = new_cqe_kcqe->fcoe_conn_id; |
1134 | struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; |
1135 | |
1136 | if (!tgt) { |
1137 | printk(KERN_ERR PFX "conn_id 0x%x not valid\n" , conn_id); |
1138 | return; |
1139 | } |
1140 | |
1141 | bnx2fc_process_new_cqes(tgt); |
1142 | } |
1143 | |
1144 | /** |
1145 | * bnx2fc_process_ofld_cmpl - process FCoE session offload completion |
1146 | * |
1147 | * @hba: adapter structure pointer |
1148 | * @ofld_kcqe: connection offload kcqe pointer |
1149 | * |
1150 | * handle session offload completion, enable the session if offload is |
1151 | * successful. |
1152 | */ |
1153 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, |
1154 | struct fcoe_kcqe *ofld_kcqe) |
1155 | { |
1156 | struct bnx2fc_rport *tgt; |
1157 | struct bnx2fc_interface *interface; |
1158 | u32 conn_id; |
1159 | u32 context_id; |
1160 | |
1161 | conn_id = ofld_kcqe->fcoe_conn_id; |
1162 | context_id = ofld_kcqe->fcoe_conn_context_id; |
1163 | tgt = hba->tgt_ofld_list[conn_id]; |
1164 | if (!tgt) { |
1165 | printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n" ); |
1166 | return; |
1167 | } |
1168 | BNX2FC_TGT_DBG(tgt, fmt: "Entered ofld compl - context_id = 0x%x\n" , |
1169 | ofld_kcqe->fcoe_conn_context_id); |
1170 | interface = tgt->port->priv; |
1171 | if (hba != interface->hba) { |
1172 | printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n" ); |
1173 | goto ofld_cmpl_err; |
1174 | } |
1175 | /* |
1176 | * cnic has allocated a context_id for this session; use this |
1177 | * while enabling the session. |
1178 | */ |
1179 | tgt->context_id = context_id; |
1180 | if (ofld_kcqe->completion_status) { |
1181 | if (ofld_kcqe->completion_status == |
1182 | FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { |
1183 | printk(KERN_ERR PFX "unable to allocate FCoE context " |
1184 | "resources\n" ); |
1185 | set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, addr: &tgt->flags); |
1186 | } |
1187 | } else { |
1188 | /* FW offload request successfully completed */ |
1189 | set_bit(BNX2FC_FLAG_OFFLOADED, addr: &tgt->flags); |
1190 | } |
1191 | ofld_cmpl_err: |
1192 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, addr: &tgt->flags); |
1193 | wake_up_interruptible(&tgt->ofld_wait); |
1194 | } |
1195 | |
1196 | /** |
1197 | * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion |
1198 | * |
1199 | * @hba: adapter structure pointer |
1200 | * @ofld_kcqe: connection offload kcqe pointer |
1201 | * |
1202 | * handle session enable completion, mark the rport as ready |
1203 | */ |
1204 | |
1205 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, |
1206 | struct fcoe_kcqe *ofld_kcqe) |
1207 | { |
1208 | struct bnx2fc_rport *tgt; |
1209 | struct bnx2fc_interface *interface; |
1210 | u32 conn_id; |
1211 | u32 context_id; |
1212 | |
1213 | context_id = ofld_kcqe->fcoe_conn_context_id; |
1214 | conn_id = ofld_kcqe->fcoe_conn_id; |
1215 | tgt = hba->tgt_ofld_list[conn_id]; |
1216 | if (!tgt) { |
1217 | printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n" ); |
1218 | return; |
1219 | } |
1220 | |
1221 | BNX2FC_TGT_DBG(tgt, fmt: "Enable compl - context_id = 0x%x\n" , |
1222 | ofld_kcqe->fcoe_conn_context_id); |
1223 | |
1224 | /* |
1225 | * context_id should be the same for this target during offload |
1226 | * and enable |
1227 | */ |
1228 | if (tgt->context_id != context_id) { |
1229 | printk(KERN_ERR PFX "context id mismatch\n" ); |
1230 | return; |
1231 | } |
1232 | interface = tgt->port->priv; |
1233 | if (hba != interface->hba) { |
1234 | printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n" ); |
1235 | goto enbl_cmpl_err; |
1236 | } |
1237 | if (!ofld_kcqe->completion_status) |
1238 | /* enable successful - rport ready for issuing IOs */ |
1239 | set_bit(BNX2FC_FLAG_ENABLED, addr: &tgt->flags); |
1240 | |
1241 | enbl_cmpl_err: |
1242 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, addr: &tgt->flags); |
1243 | wake_up_interruptible(&tgt->ofld_wait); |
1244 | } |
1245 | |
1246 | static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, |
1247 | struct fcoe_kcqe *disable_kcqe) |
1248 | { |
1249 | |
1250 | struct bnx2fc_rport *tgt; |
1251 | u32 conn_id; |
1252 | |
1253 | conn_id = disable_kcqe->fcoe_conn_id; |
1254 | tgt = hba->tgt_ofld_list[conn_id]; |
1255 | if (!tgt) { |
1256 | printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n" ); |
1257 | return; |
1258 | } |
1259 | |
1260 | BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n" , conn_id); |
1261 | |
1262 | if (disable_kcqe->completion_status) { |
1263 | printk(KERN_ERR PFX "Disable failed with cmpl status %d\n" , |
1264 | disable_kcqe->completion_status); |
1265 | set_bit(BNX2FC_FLAG_DISABLE_FAILED, addr: &tgt->flags); |
1266 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, addr: &tgt->flags); |
1267 | wake_up_interruptible(&tgt->upld_wait); |
1268 | } else { |
1269 | /* disable successful */ |
1270 | BNX2FC_TGT_DBG(tgt, fmt: "disable successful\n" ); |
1271 | clear_bit(BNX2FC_FLAG_OFFLOADED, addr: &tgt->flags); |
1272 | clear_bit(BNX2FC_FLAG_ENABLED, addr: &tgt->flags); |
1273 | set_bit(BNX2FC_FLAG_DISABLED, addr: &tgt->flags); |
1274 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, addr: &tgt->flags); |
1275 | wake_up_interruptible(&tgt->upld_wait); |
1276 | } |
1277 | } |
1278 | |
1279 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, |
1280 | struct fcoe_kcqe *destroy_kcqe) |
1281 | { |
1282 | struct bnx2fc_rport *tgt; |
1283 | u32 conn_id; |
1284 | |
1285 | conn_id = destroy_kcqe->fcoe_conn_id; |
1286 | tgt = hba->tgt_ofld_list[conn_id]; |
1287 | if (!tgt) { |
1288 | printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n" ); |
1289 | return; |
1290 | } |
1291 | |
1292 | BNX2FC_TGT_DBG(tgt, fmt: "destroy_cmpl: conn_id %d\n" , conn_id); |
1293 | |
1294 | if (destroy_kcqe->completion_status) { |
1295 | printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n" , |
1296 | destroy_kcqe->completion_status); |
1297 | return; |
1298 | } else { |
1299 | /* destroy successful */ |
1300 | BNX2FC_TGT_DBG(tgt, fmt: "upload successful\n" ); |
1301 | clear_bit(BNX2FC_FLAG_DISABLED, addr: &tgt->flags); |
1302 | set_bit(BNX2FC_FLAG_DESTROYED, addr: &tgt->flags); |
1303 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, addr: &tgt->flags); |
1304 | wake_up_interruptible(&tgt->upld_wait); |
1305 | } |
1306 | } |
1307 | |
1308 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) |
1309 | { |
1310 | switch (err_code) { |
1311 | case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: |
1312 | printk(KERN_ERR PFX "init_failure due to invalid opcode\n" ); |
1313 | break; |
1314 | |
1315 | case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: |
1316 | printk(KERN_ERR PFX "init failed due to ctx alloc failure\n" ); |
1317 | break; |
1318 | |
1319 | case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: |
1320 | printk(KERN_ERR PFX "init_failure due to NIC error\n" ); |
1321 | break; |
1322 | case FCOE_KCQE_COMPLETION_STATUS_ERROR: |
1323 | printk(KERN_ERR PFX "init failure due to compl status err\n" ); |
1324 | break; |
1325 | case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: |
1326 | printk(KERN_ERR PFX "init failure due to HSI mismatch\n" ); |
1327 | break; |
1328 | default: |
1329 | printk(KERN_ERR PFX "Unknown Error code %d\n" , err_code); |
1330 | } |
1331 | } |
1332 | |
1333 | /** |
1334 | * bnx2fc_indicate_kcqe() - process KCQE |
1335 | * |
1336 | * @context: adapter structure pointer |
1337 | * @kcq: kcqe pointer |
1338 | * @num_cqe: Number of completion queue elements |
1339 | * |
1340 | * Generic KCQ event handler |
1341 | */ |
1342 | void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], |
1343 | u32 num_cqe) |
1344 | { |
1345 | struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; |
1346 | int i = 0; |
1347 | struct fcoe_kcqe *kcqe = NULL; |
1348 | |
1349 | while (i < num_cqe) { |
1350 | kcqe = (struct fcoe_kcqe *) kcq[i++]; |
1351 | |
1352 | switch (kcqe->op_code) { |
1353 | case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: |
1354 | bnx2fc_fastpath_notification(hba, new_cqe_kcqe: kcqe); |
1355 | break; |
1356 | |
1357 | case FCOE_KCQE_OPCODE_OFFLOAD_CONN: |
1358 | bnx2fc_process_ofld_cmpl(hba, ofld_kcqe: kcqe); |
1359 | break; |
1360 | |
1361 | case FCOE_KCQE_OPCODE_ENABLE_CONN: |
1362 | bnx2fc_process_enable_conn_cmpl(hba, ofld_kcqe: kcqe); |
1363 | break; |
1364 | |
1365 | case FCOE_KCQE_OPCODE_INIT_FUNC: |
1366 | if (kcqe->completion_status != |
1367 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { |
1368 | bnx2fc_init_failure(hba, |
1369 | err_code: kcqe->completion_status); |
1370 | } else { |
1371 | set_bit(ADAPTER_STATE_UP, addr: &hba->adapter_state); |
1372 | bnx2fc_get_link_state(hba); |
1373 | printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n" , |
1374 | (u8)hba->pcidev->bus->number); |
1375 | } |
1376 | break; |
1377 | |
1378 | case FCOE_KCQE_OPCODE_DESTROY_FUNC: |
1379 | if (kcqe->completion_status != |
1380 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { |
1381 | |
1382 | printk(KERN_ERR PFX "DESTROY failed\n" ); |
1383 | } else { |
1384 | printk(KERN_ERR PFX "DESTROY success\n" ); |
1385 | } |
1386 | set_bit(BNX2FC_FLAG_DESTROY_CMPL, addr: &hba->flags); |
1387 | wake_up_interruptible(&hba->destroy_wait); |
1388 | break; |
1389 | |
1390 | case FCOE_KCQE_OPCODE_DISABLE_CONN: |
1391 | bnx2fc_process_conn_disable_cmpl(hba, disable_kcqe: kcqe); |
1392 | break; |
1393 | |
1394 | case FCOE_KCQE_OPCODE_DESTROY_CONN: |
1395 | bnx2fc_process_conn_destroy_cmpl(hba, destroy_kcqe: kcqe); |
1396 | break; |
1397 | |
1398 | case FCOE_KCQE_OPCODE_STAT_FUNC: |
1399 | if (kcqe->completion_status != |
1400 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) |
1401 | printk(KERN_ERR PFX "STAT failed\n" ); |
1402 | complete(&hba->stat_req_done); |
1403 | break; |
1404 | |
1405 | case FCOE_KCQE_OPCODE_FCOE_ERROR: |
1406 | default: |
1407 | printk(KERN_ERR PFX "unknown opcode 0x%x\n" , |
1408 | kcqe->op_code); |
1409 | } |
1410 | } |
1411 | } |
1412 | |
1413 | void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) |
1414 | { |
1415 | struct fcoe_sqe *sqe; |
1416 | |
1417 | sqe = &tgt->sq[tgt->sq_prod_idx]; |
1418 | |
1419 | /* Fill SQ WQE */ |
1420 | sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; |
1421 | sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; |
1422 | |
1423 | /* Advance SQ Prod Idx */ |
1424 | if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { |
1425 | tgt->sq_prod_idx = 0; |
1426 | tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; |
1427 | } |
1428 | } |
1429 | |
1430 | void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) |
1431 | { |
1432 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; |
1433 | u32 msg; |
1434 | |
1435 | wmb(); |
1436 | sq_db->prod = tgt->sq_prod_idx | |
1437 | (tgt->sq_curr_toggle_bit << 15); |
1438 | msg = *((u32 *)sq_db); |
1439 | writel(cpu_to_le32(msg), addr: tgt->ctx_base); |
1440 | |
1441 | } |
1442 | |
1443 | int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) |
1444 | { |
1445 | u32 context_id = tgt->context_id; |
1446 | struct fcoe_port *port = tgt->port; |
1447 | u32 reg_off; |
1448 | resource_size_t reg_base; |
1449 | struct bnx2fc_interface *interface = port->priv; |
1450 | struct bnx2fc_hba *hba = interface->hba; |
1451 | |
1452 | reg_base = pci_resource_start(hba->pcidev, |
1453 | BNX2X_DOORBELL_PCI_BAR); |
1454 | reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); |
1455 | tgt->ctx_base = ioremap(offset: reg_base + reg_off, size: 4); |
1456 | if (!tgt->ctx_base) |
1457 | return -ENOMEM; |
1458 | return 0; |
1459 | } |
1460 | |
1461 | char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) |
1462 | { |
1463 | char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); |
1464 | |
1465 | if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) |
1466 | return NULL; |
1467 | |
1468 | tgt->rq_cons_idx += num_items; |
1469 | |
1470 | if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) |
1471 | tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; |
1472 | |
1473 | return buf; |
1474 | } |
1475 | |
1476 | void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) |
1477 | { |
1478 | /* return the rq buffer */ |
1479 | u32 next_prod_idx = tgt->rq_prod_idx + num_items; |
1480 | if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { |
1481 | /* Wrap around RQ */ |
1482 | next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; |
1483 | } |
1484 | tgt->rq_prod_idx = next_prod_idx; |
1485 | tgt->conn_db->rq_prod = tgt->rq_prod_idx; |
1486 | } |
1487 | |
1488 | void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, |
1489 | struct fcoe_task_ctx_entry *task, |
1490 | struct bnx2fc_cmd *orig_io_req, |
1491 | u32 offset) |
1492 | { |
1493 | struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; |
1494 | struct bnx2fc_rport *tgt = seq_clnp_req->tgt; |
1495 | struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; |
1496 | struct fcoe_ext_mul_sges_ctx *sgl; |
1497 | u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; |
1498 | u8 orig_task_type; |
1499 | u16 orig_xid = orig_io_req->xid; |
1500 | u32 context_id = tgt->context_id; |
1501 | u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; |
1502 | u32 orig_offset = offset; |
1503 | int bd_count; |
1504 | int i; |
1505 | |
1506 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); |
1507 | |
1508 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) |
1509 | orig_task_type = FCOE_TASK_TYPE_WRITE; |
1510 | else |
1511 | orig_task_type = FCOE_TASK_TYPE_READ; |
1512 | |
1513 | /* Tx flags */ |
1514 | task->txwr_rxrd.const_ctx.tx_flags = |
1515 | FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << |
1516 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1517 | /* init flags */ |
1518 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1519 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1520 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1521 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1522 | task->rxwr_txrd.const_ctx.init_flags = context_id << |
1523 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; |
1524 | task->rxwr_txrd.const_ctx.init_flags = context_id << |
1525 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; |
1526 | |
1527 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; |
1528 | |
1529 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; |
1530 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; |
1531 | |
1532 | bd_count = orig_io_req->bd_tbl->bd_valid; |
1533 | |
1534 | /* obtain the appropriate bd entry from relative offset */ |
1535 | for (i = 0; i < bd_count; i++) { |
1536 | if (offset < bd[i].buf_len) |
1537 | break; |
1538 | offset -= bd[i].buf_len; |
1539 | } |
1540 | phys_addr += (i * sizeof(struct fcoe_bd_ctx)); |
1541 | |
1542 | if (orig_task_type == FCOE_TASK_TYPE_WRITE) { |
1543 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
1544 | (u32)phys_addr; |
1545 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
1546 | (u32)((u64)phys_addr >> 32); |
1547 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = |
1548 | bd_count; |
1549 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = |
1550 | offset; /* adjusted offset */ |
1551 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; |
1552 | } else { |
1553 | |
1554 | /* Multiple SGEs were used for this IO */ |
1555 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
1556 | sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; |
1557 | sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); |
1558 | sgl->mul_sgl.sgl_size = bd_count; |
1559 | sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ |
1560 | sgl->mul_sgl.cur_sge_idx = i; |
1561 | |
1562 | memset(&task->rxwr_only.rx_seq_ctx, 0, |
1563 | sizeof(struct fcoe_rx_seq_ctx)); |
1564 | task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; |
1565 | task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; |
1566 | } |
1567 | } |
1568 | void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, |
1569 | struct fcoe_task_ctx_entry *task, |
1570 | u16 orig_xid) |
1571 | { |
1572 | u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; |
1573 | struct bnx2fc_rport *tgt = io_req->tgt; |
1574 | u32 context_id = tgt->context_id; |
1575 | |
1576 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); |
1577 | |
1578 | /* Tx Write Rx Read */ |
1579 | /* init flags */ |
1580 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1581 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1582 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1583 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1584 | if (tgt->dev_type == TYPE_TAPE) |
1585 | task->txwr_rxrd.const_ctx.init_flags |= |
1586 | FCOE_TASK_DEV_TYPE_TAPE << |
1587 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1588 | else |
1589 | task->txwr_rxrd.const_ctx.init_flags |= |
1590 | FCOE_TASK_DEV_TYPE_DISK << |
1591 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1592 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; |
1593 | |
1594 | /* Tx flags */ |
1595 | task->txwr_rxrd.const_ctx.tx_flags = |
1596 | FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << |
1597 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1598 | |
1599 | /* Rx Read Tx Write */ |
1600 | task->rxwr_txrd.const_ctx.init_flags = context_id << |
1601 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; |
1602 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << |
1603 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; |
1604 | } |
1605 | |
1606 | void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, |
1607 | struct fcoe_task_ctx_entry *task) |
1608 | { |
1609 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); |
1610 | struct bnx2fc_rport *tgt = io_req->tgt; |
1611 | struct fc_frame_header *fc_hdr; |
1612 | struct fcoe_ext_mul_sges_ctx *sgl; |
1613 | u8 task_type = 0; |
1614 | u64 *hdr; |
1615 | u64 temp_hdr[3]; |
1616 | u32 context_id; |
1617 | |
1618 | |
1619 | /* Obtain task_type */ |
1620 | if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || |
1621 | (io_req->cmd_type == BNX2FC_ELS)) { |
1622 | task_type = FCOE_TASK_TYPE_MIDPATH; |
1623 | } else if (io_req->cmd_type == BNX2FC_ABTS) { |
1624 | task_type = FCOE_TASK_TYPE_ABTS; |
1625 | } |
1626 | |
1627 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); |
1628 | |
1629 | /* Setup the task from io_req for easy reference */ |
1630 | io_req->task = task; |
1631 | |
1632 | BNX2FC_IO_DBG(io_req, fmt: "Init MP task for cmd_type = %d task_type = %d\n" , |
1633 | io_req->cmd_type, task_type); |
1634 | |
1635 | /* Tx only */ |
1636 | if ((task_type == FCOE_TASK_TYPE_MIDPATH) || |
1637 | (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { |
1638 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
1639 | (u32)mp_req->mp_req_bd_dma; |
1640 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
1641 | (u32)((u64)mp_req->mp_req_bd_dma >> 32); |
1642 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; |
1643 | } |
1644 | |
1645 | /* Tx Write Rx Read */ |
1646 | /* init flags */ |
1647 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1648 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1649 | if (tgt->dev_type == TYPE_TAPE) |
1650 | task->txwr_rxrd.const_ctx.init_flags |= |
1651 | FCOE_TASK_DEV_TYPE_TAPE << |
1652 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1653 | else |
1654 | task->txwr_rxrd.const_ctx.init_flags |= |
1655 | FCOE_TASK_DEV_TYPE_DISK << |
1656 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1657 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1658 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1659 | |
1660 | /* tx flags */ |
1661 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << |
1662 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1663 | |
1664 | /* Rx Write Tx Read */ |
1665 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1666 | |
1667 | /* rx flags */ |
1668 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << |
1669 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; |
1670 | |
1671 | context_id = tgt->context_id; |
1672 | task->rxwr_txrd.const_ctx.init_flags = context_id << |
1673 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; |
1674 | |
1675 | fc_hdr = &(mp_req->req_fc_hdr); |
1676 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { |
1677 | fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); |
1678 | fc_hdr->fh_rx_id = htons(0xffff); |
1679 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; |
1680 | } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { |
1681 | fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); |
1682 | } |
1683 | |
1684 | /* Fill FC Header into middle path buffer */ |
1685 | hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; |
1686 | memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); |
1687 | hdr[0] = cpu_to_be64(temp_hdr[0]); |
1688 | hdr[1] = cpu_to_be64(temp_hdr[1]); |
1689 | hdr[2] = cpu_to_be64(temp_hdr[2]); |
1690 | |
1691 | /* Rx Only */ |
1692 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { |
1693 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
1694 | |
1695 | sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; |
1696 | sgl->mul_sgl.cur_sge_addr.hi = |
1697 | (u32)((u64)mp_req->mp_resp_bd_dma >> 32); |
1698 | sgl->mul_sgl.sgl_size = 1; |
1699 | } |
1700 | } |
1701 | |
1702 | void bnx2fc_init_task(struct bnx2fc_cmd *io_req, |
1703 | struct fcoe_task_ctx_entry *task) |
1704 | { |
1705 | u8 task_type; |
1706 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; |
1707 | struct io_bdt *bd_tbl = io_req->bd_tbl; |
1708 | struct bnx2fc_rport *tgt = io_req->tgt; |
1709 | struct fcoe_cached_sge_ctx *cached_sge; |
1710 | struct fcoe_ext_mul_sges_ctx *sgl; |
1711 | int dev_type = tgt->dev_type; |
1712 | struct fcp_cmnd *fcp_cmnd; |
1713 | u64 *raw_fcp_cmnd; |
1714 | u64 tmp_fcp_cmnd[4]; |
1715 | u32 context_id; |
1716 | int cnt, i; |
1717 | int bd_count; |
1718 | |
1719 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); |
1720 | |
1721 | /* Setup the task from io_req for easy reference */ |
1722 | io_req->task = task; |
1723 | |
1724 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) |
1725 | task_type = FCOE_TASK_TYPE_WRITE; |
1726 | else |
1727 | task_type = FCOE_TASK_TYPE_READ; |
1728 | |
1729 | /* Tx only */ |
1730 | bd_count = bd_tbl->bd_valid; |
1731 | cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; |
1732 | if (task_type == FCOE_TASK_TYPE_WRITE) { |
1733 | if ((dev_type == TYPE_DISK) && (bd_count == 1)) { |
1734 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; |
1735 | |
1736 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = |
1737 | cached_sge->cur_buf_addr.lo = |
1738 | fcoe_bd_tbl->buf_addr_lo; |
1739 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = |
1740 | cached_sge->cur_buf_addr.hi = |
1741 | fcoe_bd_tbl->buf_addr_hi; |
1742 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = |
1743 | cached_sge->cur_buf_rem = |
1744 | fcoe_bd_tbl->buf_len; |
1745 | |
1746 | task->txwr_rxrd.const_ctx.init_flags |= 1 << |
1747 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; |
1748 | } else { |
1749 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
1750 | (u32)bd_tbl->bd_tbl_dma; |
1751 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
1752 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
1753 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = |
1754 | bd_tbl->bd_valid; |
1755 | } |
1756 | } |
1757 | |
1758 | /*Tx Write Rx Read */ |
1759 | /* Init state to NORMAL */ |
1760 | task->txwr_rxrd.const_ctx.init_flags |= task_type << |
1761 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1762 | if (dev_type == TYPE_TAPE) { |
1763 | task->txwr_rxrd.const_ctx.init_flags |= |
1764 | FCOE_TASK_DEV_TYPE_TAPE << |
1765 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1766 | io_req->rec_retry = 0; |
1767 | io_req->rec_retry = 0; |
1768 | } else |
1769 | task->txwr_rxrd.const_ctx.init_flags |= |
1770 | FCOE_TASK_DEV_TYPE_DISK << |
1771 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1772 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1773 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1774 | /* tx flags */ |
1775 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << |
1776 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1777 | |
1778 | /* Set initial seq counter */ |
1779 | task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; |
1780 | |
1781 | /* Fill FCP_CMND IU */ |
1782 | fcp_cmnd = (struct fcp_cmnd *)&tmp_fcp_cmnd; |
1783 | bnx2fc_build_fcp_cmnd(io_req, fcp_cmnd); |
1784 | int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); |
1785 | memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); |
1786 | raw_fcp_cmnd = (u64 *) |
1787 | task->txwr_rxrd.union_ctx.fcp_cmd.opaque; |
1788 | |
1789 | /* swap fcp_cmnd */ |
1790 | cnt = sizeof(struct fcp_cmnd) / sizeof(u64); |
1791 | |
1792 | for (i = 0; i < cnt; i++) { |
1793 | *raw_fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); |
1794 | raw_fcp_cmnd++; |
1795 | } |
1796 | |
1797 | /* Rx Write Tx Read */ |
1798 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1799 | |
1800 | context_id = tgt->context_id; |
1801 | task->rxwr_txrd.const_ctx.init_flags = context_id << |
1802 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; |
1803 | |
1804 | /* rx flags */ |
1805 | /* Set state to "waiting for the first packet" */ |
1806 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << |
1807 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; |
1808 | |
1809 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; |
1810 | |
1811 | /* Rx Only */ |
1812 | if (task_type != FCOE_TASK_TYPE_READ) |
1813 | return; |
1814 | |
1815 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
1816 | bd_count = bd_tbl->bd_valid; |
1817 | |
1818 | if (dev_type == TYPE_DISK) { |
1819 | if (bd_count == 1) { |
1820 | |
1821 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; |
1822 | |
1823 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; |
1824 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; |
1825 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; |
1826 | task->txwr_rxrd.const_ctx.init_flags |= 1 << |
1827 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; |
1828 | } else if (bd_count == 2) { |
1829 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; |
1830 | |
1831 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; |
1832 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; |
1833 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; |
1834 | |
1835 | fcoe_bd_tbl++; |
1836 | cached_sge->second_buf_addr.lo = |
1837 | fcoe_bd_tbl->buf_addr_lo; |
1838 | cached_sge->second_buf_addr.hi = |
1839 | fcoe_bd_tbl->buf_addr_hi; |
1840 | cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; |
1841 | task->txwr_rxrd.const_ctx.init_flags |= 1 << |
1842 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; |
1843 | } else { |
1844 | |
1845 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; |
1846 | sgl->mul_sgl.cur_sge_addr.hi = |
1847 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
1848 | sgl->mul_sgl.sgl_size = bd_count; |
1849 | } |
1850 | } else { |
1851 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; |
1852 | sgl->mul_sgl.cur_sge_addr.hi = |
1853 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
1854 | sgl->mul_sgl.sgl_size = bd_count; |
1855 | } |
1856 | } |
1857 | |
1858 | /** |
1859 | * bnx2fc_setup_task_ctx - allocate and map task context |
1860 | * |
1861 | * @hba: pointer to adapter structure |
1862 | * |
1863 | * allocate memory for task context, and associated BD table to be used |
1864 | * by firmware |
1865 | * |
1866 | */ |
1867 | int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) |
1868 | { |
1869 | int rc = 0; |
1870 | struct regpair *task_ctx_bdt; |
1871 | dma_addr_t addr; |
1872 | int task_ctx_arr_sz; |
1873 | int i; |
1874 | |
1875 | /* |
1876 | * Allocate task context bd table. A page size of bd table |
1877 | * can map 256 buffers. Each buffer contains 32 task context |
1878 | * entries. Hence the limit with one page is 8192 task context |
1879 | * entries. |
1880 | */ |
1881 | hba->task_ctx_bd_tbl = dma_alloc_coherent(dev: &hba->pcidev->dev, |
1882 | PAGE_SIZE, |
1883 | dma_handle: &hba->task_ctx_bd_dma, |
1884 | GFP_KERNEL); |
1885 | if (!hba->task_ctx_bd_tbl) { |
1886 | printk(KERN_ERR PFX "unable to allocate task context BDT\n" ); |
1887 | rc = -1; |
1888 | goto out; |
1889 | } |
1890 | |
1891 | /* |
1892 | * Allocate task_ctx which is an array of pointers pointing to |
1893 | * a page containing 32 task contexts |
1894 | */ |
1895 | task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); |
1896 | hba->task_ctx = kzalloc(size: (task_ctx_arr_sz * sizeof(void *)), |
1897 | GFP_KERNEL); |
1898 | if (!hba->task_ctx) { |
1899 | printk(KERN_ERR PFX "unable to allocate task context array\n" ); |
1900 | rc = -1; |
1901 | goto out1; |
1902 | } |
1903 | |
1904 | /* |
1905 | * Allocate task_ctx_dma which is an array of dma addresses |
1906 | */ |
1907 | hba->task_ctx_dma = kmalloc(size: (task_ctx_arr_sz * |
1908 | sizeof(dma_addr_t)), GFP_KERNEL); |
1909 | if (!hba->task_ctx_dma) { |
1910 | printk(KERN_ERR PFX "unable to alloc context mapping array\n" ); |
1911 | rc = -1; |
1912 | goto out2; |
1913 | } |
1914 | |
1915 | task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; |
1916 | for (i = 0; i < task_ctx_arr_sz; i++) { |
1917 | |
1918 | hba->task_ctx[i] = dma_alloc_coherent(dev: &hba->pcidev->dev, |
1919 | PAGE_SIZE, |
1920 | dma_handle: &hba->task_ctx_dma[i], |
1921 | GFP_KERNEL); |
1922 | if (!hba->task_ctx[i]) { |
1923 | printk(KERN_ERR PFX "unable to alloc task context\n" ); |
1924 | rc = -1; |
1925 | goto out3; |
1926 | } |
1927 | addr = (u64)hba->task_ctx_dma[i]; |
1928 | task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); |
1929 | task_ctx_bdt->lo = cpu_to_le32((u32)addr); |
1930 | task_ctx_bdt++; |
1931 | } |
1932 | return 0; |
1933 | |
1934 | out3: |
1935 | for (i = 0; i < task_ctx_arr_sz; i++) { |
1936 | if (hba->task_ctx[i]) { |
1937 | |
1938 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
1939 | cpu_addr: hba->task_ctx[i], dma_handle: hba->task_ctx_dma[i]); |
1940 | hba->task_ctx[i] = NULL; |
1941 | } |
1942 | } |
1943 | |
1944 | kfree(objp: hba->task_ctx_dma); |
1945 | hba->task_ctx_dma = NULL; |
1946 | out2: |
1947 | kfree(objp: hba->task_ctx); |
1948 | hba->task_ctx = NULL; |
1949 | out1: |
1950 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
1951 | cpu_addr: hba->task_ctx_bd_tbl, dma_handle: hba->task_ctx_bd_dma); |
1952 | hba->task_ctx_bd_tbl = NULL; |
1953 | out: |
1954 | return rc; |
1955 | } |
1956 | |
1957 | void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) |
1958 | { |
1959 | int task_ctx_arr_sz; |
1960 | int i; |
1961 | |
1962 | if (hba->task_ctx_bd_tbl) { |
1963 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
1964 | cpu_addr: hba->task_ctx_bd_tbl, |
1965 | dma_handle: hba->task_ctx_bd_dma); |
1966 | hba->task_ctx_bd_tbl = NULL; |
1967 | } |
1968 | |
1969 | task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); |
1970 | if (hba->task_ctx) { |
1971 | for (i = 0; i < task_ctx_arr_sz; i++) { |
1972 | if (hba->task_ctx[i]) { |
1973 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
1974 | cpu_addr: hba->task_ctx[i], |
1975 | dma_handle: hba->task_ctx_dma[i]); |
1976 | hba->task_ctx[i] = NULL; |
1977 | } |
1978 | } |
1979 | kfree(objp: hba->task_ctx); |
1980 | hba->task_ctx = NULL; |
1981 | } |
1982 | |
1983 | kfree(objp: hba->task_ctx_dma); |
1984 | hba->task_ctx_dma = NULL; |
1985 | } |
1986 | |
1987 | static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) |
1988 | { |
1989 | int i; |
1990 | int segment_count; |
1991 | u32 *pbl; |
1992 | |
1993 | if (hba->hash_tbl_segments) { |
1994 | |
1995 | pbl = hba->hash_tbl_pbl; |
1996 | if (pbl) { |
1997 | segment_count = hba->hash_tbl_segment_count; |
1998 | for (i = 0; i < segment_count; ++i) { |
1999 | dma_addr_t dma_address; |
2000 | |
2001 | dma_address = le32_to_cpu(*pbl); |
2002 | ++pbl; |
2003 | dma_address += ((u64)le32_to_cpu(*pbl)) << 32; |
2004 | ++pbl; |
2005 | dma_free_coherent(dev: &hba->pcidev->dev, |
2006 | BNX2FC_HASH_TBL_CHUNK_SIZE, |
2007 | cpu_addr: hba->hash_tbl_segments[i], |
2008 | dma_handle: dma_address); |
2009 | } |
2010 | } |
2011 | |
2012 | kfree(objp: hba->hash_tbl_segments); |
2013 | hba->hash_tbl_segments = NULL; |
2014 | } |
2015 | |
2016 | if (hba->hash_tbl_pbl) { |
2017 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
2018 | cpu_addr: hba->hash_tbl_pbl, |
2019 | dma_handle: hba->hash_tbl_pbl_dma); |
2020 | hba->hash_tbl_pbl = NULL; |
2021 | } |
2022 | } |
2023 | |
2024 | static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) |
2025 | { |
2026 | int i; |
2027 | int hash_table_size; |
2028 | int segment_count; |
2029 | int segment_array_size; |
2030 | int dma_segment_array_size; |
2031 | dma_addr_t *dma_segment_array; |
2032 | u32 *pbl; |
2033 | |
2034 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * |
2035 | sizeof(struct fcoe_hash_table_entry); |
2036 | |
2037 | segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; |
2038 | segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; |
2039 | hba->hash_tbl_segment_count = segment_count; |
2040 | |
2041 | segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); |
2042 | hba->hash_tbl_segments = kzalloc(size: segment_array_size, GFP_KERNEL); |
2043 | if (!hba->hash_tbl_segments) { |
2044 | printk(KERN_ERR PFX "hash table pointers alloc failed\n" ); |
2045 | return -ENOMEM; |
2046 | } |
2047 | dma_segment_array_size = segment_count * sizeof(*dma_segment_array); |
2048 | dma_segment_array = kzalloc(size: dma_segment_array_size, GFP_KERNEL); |
2049 | if (!dma_segment_array) { |
2050 | printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n" ); |
2051 | goto cleanup_ht; |
2052 | } |
2053 | |
2054 | for (i = 0; i < segment_count; ++i) { |
2055 | hba->hash_tbl_segments[i] = dma_alloc_coherent(dev: &hba->pcidev->dev, |
2056 | BNX2FC_HASH_TBL_CHUNK_SIZE, |
2057 | dma_handle: &dma_segment_array[i], |
2058 | GFP_KERNEL); |
2059 | if (!hba->hash_tbl_segments[i]) { |
2060 | printk(KERN_ERR PFX "hash segment alloc failed\n" ); |
2061 | goto cleanup_dma; |
2062 | } |
2063 | } |
2064 | |
2065 | hba->hash_tbl_pbl = dma_alloc_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
2066 | dma_handle: &hba->hash_tbl_pbl_dma, |
2067 | GFP_KERNEL); |
2068 | if (!hba->hash_tbl_pbl) { |
2069 | printk(KERN_ERR PFX "hash table pbl alloc failed\n" ); |
2070 | goto cleanup_dma; |
2071 | } |
2072 | |
2073 | pbl = hba->hash_tbl_pbl; |
2074 | for (i = 0; i < segment_count; ++i) { |
2075 | u64 paddr = dma_segment_array[i]; |
2076 | *pbl = cpu_to_le32((u32) paddr); |
2077 | ++pbl; |
2078 | *pbl = cpu_to_le32((u32) (paddr >> 32)); |
2079 | ++pbl; |
2080 | } |
2081 | pbl = hba->hash_tbl_pbl; |
2082 | i = 0; |
2083 | while (*pbl && *(pbl + 1)) { |
2084 | ++pbl; |
2085 | ++pbl; |
2086 | ++i; |
2087 | } |
2088 | kfree(objp: dma_segment_array); |
2089 | return 0; |
2090 | |
2091 | cleanup_dma: |
2092 | for (i = 0; i < segment_count; ++i) { |
2093 | if (hba->hash_tbl_segments[i]) |
2094 | dma_free_coherent(dev: &hba->pcidev->dev, |
2095 | BNX2FC_HASH_TBL_CHUNK_SIZE, |
2096 | cpu_addr: hba->hash_tbl_segments[i], |
2097 | dma_handle: dma_segment_array[i]); |
2098 | } |
2099 | |
2100 | kfree(objp: dma_segment_array); |
2101 | |
2102 | cleanup_ht: |
2103 | kfree(objp: hba->hash_tbl_segments); |
2104 | hba->hash_tbl_segments = NULL; |
2105 | return -ENOMEM; |
2106 | } |
2107 | |
2108 | /** |
2109 | * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer |
2110 | * |
2111 | * @hba: Pointer to adapter structure |
2112 | * |
2113 | */ |
2114 | int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) |
2115 | { |
2116 | u64 addr; |
2117 | u32 mem_size; |
2118 | int i; |
2119 | |
2120 | if (bnx2fc_allocate_hash_table(hba)) |
2121 | return -ENOMEM; |
2122 | |
2123 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); |
2124 | hba->t2_hash_tbl_ptr = dma_alloc_coherent(dev: &hba->pcidev->dev, size: mem_size, |
2125 | dma_handle: &hba->t2_hash_tbl_ptr_dma, |
2126 | GFP_KERNEL); |
2127 | if (!hba->t2_hash_tbl_ptr) { |
2128 | printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n" ); |
2129 | bnx2fc_free_fw_resc(hba); |
2130 | return -ENOMEM; |
2131 | } |
2132 | |
2133 | mem_size = BNX2FC_NUM_MAX_SESS * |
2134 | sizeof(struct fcoe_t2_hash_table_entry); |
2135 | hba->t2_hash_tbl = dma_alloc_coherent(dev: &hba->pcidev->dev, size: mem_size, |
2136 | dma_handle: &hba->t2_hash_tbl_dma, |
2137 | GFP_KERNEL); |
2138 | if (!hba->t2_hash_tbl) { |
2139 | printk(KERN_ERR PFX "unable to allocate t2 hash table\n" ); |
2140 | bnx2fc_free_fw_resc(hba); |
2141 | return -ENOMEM; |
2142 | } |
2143 | for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { |
2144 | addr = (unsigned long) hba->t2_hash_tbl_dma + |
2145 | ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); |
2146 | hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; |
2147 | hba->t2_hash_tbl[i].next.hi = addr >> 32; |
2148 | } |
2149 | |
2150 | hba->dummy_buffer = dma_alloc_coherent(dev: &hba->pcidev->dev, |
2151 | PAGE_SIZE, dma_handle: &hba->dummy_buf_dma, |
2152 | GFP_KERNEL); |
2153 | if (!hba->dummy_buffer) { |
2154 | printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n" ); |
2155 | bnx2fc_free_fw_resc(hba); |
2156 | return -ENOMEM; |
2157 | } |
2158 | |
2159 | hba->stats_buffer = dma_alloc_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
2160 | dma_handle: &hba->stats_buf_dma, |
2161 | GFP_KERNEL); |
2162 | if (!hba->stats_buffer) { |
2163 | printk(KERN_ERR PFX "unable to alloc Stats Buffer\n" ); |
2164 | bnx2fc_free_fw_resc(hba); |
2165 | return -ENOMEM; |
2166 | } |
2167 | |
2168 | return 0; |
2169 | } |
2170 | |
2171 | void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) |
2172 | { |
2173 | u32 mem_size; |
2174 | |
2175 | if (hba->stats_buffer) { |
2176 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
2177 | cpu_addr: hba->stats_buffer, dma_handle: hba->stats_buf_dma); |
2178 | hba->stats_buffer = NULL; |
2179 | } |
2180 | |
2181 | if (hba->dummy_buffer) { |
2182 | dma_free_coherent(dev: &hba->pcidev->dev, PAGE_SIZE, |
2183 | cpu_addr: hba->dummy_buffer, dma_handle: hba->dummy_buf_dma); |
2184 | hba->dummy_buffer = NULL; |
2185 | } |
2186 | |
2187 | if (hba->t2_hash_tbl_ptr) { |
2188 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); |
2189 | dma_free_coherent(dev: &hba->pcidev->dev, size: mem_size, |
2190 | cpu_addr: hba->t2_hash_tbl_ptr, |
2191 | dma_handle: hba->t2_hash_tbl_ptr_dma); |
2192 | hba->t2_hash_tbl_ptr = NULL; |
2193 | } |
2194 | |
2195 | if (hba->t2_hash_tbl) { |
2196 | mem_size = BNX2FC_NUM_MAX_SESS * |
2197 | sizeof(struct fcoe_t2_hash_table_entry); |
2198 | dma_free_coherent(dev: &hba->pcidev->dev, size: mem_size, |
2199 | cpu_addr: hba->t2_hash_tbl, dma_handle: hba->t2_hash_tbl_dma); |
2200 | hba->t2_hash_tbl = NULL; |
2201 | } |
2202 | bnx2fc_free_hash_table(hba); |
2203 | } |
2204 | |