1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * QLogic Fibre Channel HBA Driver |
4 | * Copyright (c) 2003-2014 QLogic Corporation |
5 | */ |
6 | #include "qla_def.h" |
7 | #include "qla_gbl.h" |
8 | #include "qla_target.h" |
9 | |
10 | #include <linux/moduleparam.h> |
11 | #include <linux/vmalloc.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/list.h> |
14 | |
15 | #include <scsi/scsi_tcq.h> |
16 | #include <scsi/scsicam.h> |
17 | #include <linux/delay.h> |
18 | |
19 | void |
20 | qla2x00_vp_stop_timer(scsi_qla_host_t *vha) |
21 | { |
22 | if (vha->vp_idx && vha->timer_active) { |
23 | del_timer_sync(timer: &vha->timer); |
24 | vha->timer_active = 0; |
25 | } |
26 | } |
27 | |
28 | static uint32_t |
29 | qla24xx_allocate_vp_id(scsi_qla_host_t *vha) |
30 | { |
31 | uint32_t vp_id; |
32 | struct qla_hw_data *ha = vha->hw; |
33 | unsigned long flags; |
34 | |
35 | /* Find an empty slot and assign an vp_id */ |
36 | mutex_lock(&ha->vport_lock); |
37 | vp_id = find_first_zero_bit(addr: ha->vp_idx_map, size: ha->max_npiv_vports + 1); |
38 | if (vp_id > ha->max_npiv_vports) { |
39 | ql_dbg(ql_dbg_vport, vha, 0xa000, |
40 | fmt: "vp_id %d is bigger than max-supported %d.\n" , |
41 | vp_id, ha->max_npiv_vports); |
42 | mutex_unlock(lock: &ha->vport_lock); |
43 | return vp_id; |
44 | } |
45 | |
46 | set_bit(nr: vp_id, addr: ha->vp_idx_map); |
47 | ha->num_vhosts++; |
48 | vha->vp_idx = vp_id; |
49 | |
50 | spin_lock_irqsave(&ha->vport_slock, flags); |
51 | list_add_tail(new: &vha->list, head: &ha->vp_list); |
52 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
53 | |
54 | spin_lock_irqsave(&ha->hardware_lock, flags); |
55 | qla_update_vp_map(vha, SET_VP_IDX); |
56 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
57 | |
58 | mutex_unlock(lock: &ha->vport_lock); |
59 | return vp_id; |
60 | } |
61 | |
62 | void |
63 | qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) |
64 | { |
65 | uint16_t vp_id; |
66 | struct qla_hw_data *ha = vha->hw; |
67 | unsigned long flags = 0; |
68 | u32 i, bailout; |
69 | |
70 | mutex_lock(&ha->vport_lock); |
71 | /* |
72 | * Wait for all pending activities to finish before removing vport from |
73 | * the list. |
74 | * Lock needs to be held for safe removal from the list (it |
75 | * ensures no active vp_list traversal while the vport is removed |
76 | * from the queue) |
77 | */ |
78 | bailout = 0; |
79 | for (i = 0; i < 500; i++) { |
80 | spin_lock_irqsave(&ha->vport_slock, flags); |
81 | if (atomic_read(v: &vha->vref_count) == 0) { |
82 | list_del(entry: &vha->list); |
83 | qla_update_vp_map(vha, RESET_VP_IDX); |
84 | bailout = 1; |
85 | } |
86 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
87 | |
88 | if (bailout) |
89 | break; |
90 | else |
91 | msleep(msecs: 20); |
92 | } |
93 | if (!bailout) { |
94 | ql_log(ql_log_info, vha, 0xfffa, |
95 | fmt: "vha->vref_count=%u timeout\n" , vha->vref_count.counter); |
96 | spin_lock_irqsave(&ha->vport_slock, flags); |
97 | list_del(entry: &vha->list); |
98 | qla_update_vp_map(vha, RESET_VP_IDX); |
99 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
100 | } |
101 | |
102 | vp_id = vha->vp_idx; |
103 | ha->num_vhosts--; |
104 | clear_bit(nr: vp_id, addr: ha->vp_idx_map); |
105 | |
106 | mutex_unlock(lock: &ha->vport_lock); |
107 | } |
108 | |
109 | static scsi_qla_host_t * |
110 | qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) |
111 | { |
112 | scsi_qla_host_t *vha; |
113 | struct scsi_qla_host *tvha; |
114 | unsigned long flags; |
115 | |
116 | spin_lock_irqsave(&ha->vport_slock, flags); |
117 | /* Locate matching device in database. */ |
118 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { |
119 | if (!memcmp(p: port_name, q: vha->port_name, WWN_SIZE)) { |
120 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
121 | return vha; |
122 | } |
123 | } |
124 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
125 | return NULL; |
126 | } |
127 | |
128 | /* |
129 | * qla2x00_mark_vp_devices_dead |
130 | * Updates fcport state when device goes offline. |
131 | * |
132 | * Input: |
133 | * ha = adapter block pointer. |
134 | * fcport = port structure pointer. |
135 | * |
136 | * Return: |
137 | * None. |
138 | * |
139 | * Context: |
140 | */ |
141 | static void |
142 | qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) |
143 | { |
144 | /* |
145 | * !!! NOTE !!! |
146 | * This function, if called in contexts other than vp create, disable |
147 | * or delete, please make sure this is synchronized with the |
148 | * delete thread. |
149 | */ |
150 | fc_port_t *fcport; |
151 | |
152 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
153 | ql_dbg(ql_dbg_vport, vha, 0xa001, |
154 | fmt: "Marking port dead, loop_id=0x%04x : %x.\n" , |
155 | fcport->loop_id, fcport->vha->vp_idx); |
156 | |
157 | qla2x00_mark_device_lost(vha, fcport, 0); |
158 | qla2x00_set_fcport_state(fcport, state: FCS_UNCONFIGURED); |
159 | } |
160 | } |
161 | |
162 | int |
163 | qla24xx_disable_vp(scsi_qla_host_t *vha) |
164 | { |
165 | unsigned long flags; |
166 | int ret = QLA_SUCCESS; |
167 | fc_port_t *fcport; |
168 | |
169 | if (vha->hw->flags.edif_enabled) { |
170 | if (DBELL_ACTIVE(vha)) |
171 | qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, |
172 | FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); |
173 | /* delete sessions and flush sa_indexes */ |
174 | qla2x00_wait_for_sess_deletion(vha); |
175 | } |
176 | |
177 | if (vha->hw->flags.fw_started) |
178 | ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); |
179 | |
180 | atomic_set(v: &vha->loop_state, LOOP_DOWN); |
181 | atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME); |
182 | list_for_each_entry(fcport, &vha->vp_fcports, list) |
183 | fcport->logout_on_delete = 0; |
184 | |
185 | if (!vha->hw->flags.edif_enabled) |
186 | qla2x00_wait_for_sess_deletion(vha); |
187 | |
188 | /* Remove port id from vp target map */ |
189 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); |
190 | qla_update_vp_map(vha, RESET_AL_PA); |
191 | spin_unlock_irqrestore(lock: &vha->hw->hardware_lock, flags); |
192 | |
193 | qla2x00_mark_vp_devices_dead(vha); |
194 | atomic_set(v: &vha->vp_state, VP_FAILED); |
195 | vha->flags.management_server_logged_in = 0; |
196 | if (ret == QLA_SUCCESS) { |
197 | fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_DISABLED); |
198 | } else { |
199 | fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED); |
200 | return -1; |
201 | } |
202 | return 0; |
203 | } |
204 | |
205 | int |
206 | qla24xx_enable_vp(scsi_qla_host_t *vha) |
207 | { |
208 | int ret; |
209 | struct qla_hw_data *ha = vha->hw; |
210 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
211 | |
212 | /* Check if physical ha port is Up */ |
213 | if (atomic_read(v: &base_vha->loop_state) == LOOP_DOWN || |
214 | atomic_read(v: &base_vha->loop_state) == LOOP_DEAD || |
215 | !(ha->current_topology & ISP_CFG_F)) { |
216 | vha->vp_err_state = VP_ERR_PORTDWN; |
217 | fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_LINKDOWN); |
218 | ql_dbg(ql_dbg_taskm, vha, 0x800b, |
219 | fmt: "%s skip enable. loop_state %x topo %x\n" , |
220 | __func__, base_vha->loop_state.counter, |
221 | ha->current_topology); |
222 | |
223 | goto enable_failed; |
224 | } |
225 | |
226 | /* Initialize the new vport unless it is a persistent port */ |
227 | mutex_lock(&ha->vport_lock); |
228 | ret = qla24xx_modify_vp_config(vha); |
229 | mutex_unlock(lock: &ha->vport_lock); |
230 | |
231 | if (ret != QLA_SUCCESS) { |
232 | fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED); |
233 | goto enable_failed; |
234 | } |
235 | |
236 | ql_dbg(ql_dbg_taskm, vha, 0x801a, |
237 | fmt: "Virtual port with id: %d - Enabled.\n" , vha->vp_idx); |
238 | return 0; |
239 | |
240 | enable_failed: |
241 | ql_dbg(ql_dbg_taskm, vha, 0x801b, |
242 | fmt: "Virtual port with id: %d - Disabled.\n" , vha->vp_idx); |
243 | return 1; |
244 | } |
245 | |
246 | static void |
247 | qla24xx_configure_vp(scsi_qla_host_t *vha) |
248 | { |
249 | struct fc_vport *fc_vport; |
250 | int ret; |
251 | |
252 | fc_vport = vha->fc_vport; |
253 | |
254 | ql_dbg(ql_dbg_vport, vha, 0xa002, |
255 | fmt: "%s: change request #3.\n" , __func__); |
256 | ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); |
257 | if (ret != QLA_SUCCESS) { |
258 | ql_dbg(ql_dbg_vport, vha, 0xa003, fmt: "Failed to enable " |
259 | "receiving of RSCN requests: 0x%x.\n" , ret); |
260 | return; |
261 | } else { |
262 | /* Corresponds to SCR enabled */ |
263 | clear_bit(VP_SCR_NEEDED, addr: &vha->vp_flags); |
264 | } |
265 | |
266 | vha->flags.online = 1; |
267 | if (qla24xx_configure_vhba(vha)) |
268 | return; |
269 | |
270 | atomic_set(v: &vha->vp_state, VP_ACTIVE); |
271 | fc_vport_set_state(vport: fc_vport, new_state: FC_VPORT_ACTIVE); |
272 | } |
273 | |
274 | void |
275 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) |
276 | { |
277 | scsi_qla_host_t *vha, *tvp; |
278 | struct qla_hw_data *ha = rsp->hw; |
279 | int i = 0; |
280 | unsigned long flags; |
281 | |
282 | spin_lock_irqsave(&ha->vport_slock, flags); |
283 | list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { |
284 | if (vha->vp_idx) { |
285 | if (test_bit(VPORT_DELETE, &vha->dpc_flags)) |
286 | continue; |
287 | |
288 | atomic_inc(v: &vha->vref_count); |
289 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
290 | |
291 | switch (mb[0]) { |
292 | case MBA_LIP_OCCURRED: |
293 | case MBA_LOOP_UP: |
294 | case MBA_LOOP_DOWN: |
295 | case MBA_LIP_RESET: |
296 | case MBA_POINT_TO_POINT: |
297 | case MBA_CHG_IN_CONNECTION: |
298 | ql_dbg(ql_dbg_async, vha, 0x5024, |
299 | fmt: "Async_event for VP[%d], mb=0x%x vha=%p.\n" , |
300 | i, *mb, vha); |
301 | qla2x00_async_event(vha, rsp, mb); |
302 | break; |
303 | case MBA_PORT_UPDATE: |
304 | case MBA_RSCN_UPDATE: |
305 | if ((mb[3] & 0xff) == vha->vp_idx) { |
306 | ql_dbg(ql_dbg_async, vha, 0x5024, |
307 | fmt: "Async_event for VP[%d], mb=0x%x vha=%p\n" , |
308 | i, *mb, vha); |
309 | qla2x00_async_event(vha, rsp, mb); |
310 | } |
311 | break; |
312 | } |
313 | |
314 | spin_lock_irqsave(&ha->vport_slock, flags); |
315 | atomic_dec(v: &vha->vref_count); |
316 | wake_up(&vha->vref_waitq); |
317 | } |
318 | i++; |
319 | } |
320 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
321 | } |
322 | |
323 | int |
324 | qla2x00_vp_abort_isp(scsi_qla_host_t *vha) |
325 | { |
326 | fc_port_t *fcport; |
327 | |
328 | /* |
329 | * To exclusively reset vport, we need to log it out first. |
330 | * Note: This control_vp can fail if ISP reset is already |
331 | * issued, this is expected, as the vp would be already |
332 | * logged out due to ISP reset. |
333 | */ |
334 | if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { |
335 | qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); |
336 | list_for_each_entry(fcport, &vha->vp_fcports, list) |
337 | fcport->logout_on_delete = 0; |
338 | } |
339 | |
340 | /* |
341 | * Physical port will do most of the abort and recovery work. We can |
342 | * just treat it as a loop down |
343 | */ |
344 | if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) { |
345 | atomic_set(v: &vha->loop_state, LOOP_DOWN); |
346 | qla2x00_mark_all_devices_lost(vha); |
347 | } else { |
348 | if (!atomic_read(v: &vha->loop_down_timer)) |
349 | atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME); |
350 | } |
351 | |
352 | ql_dbg(ql_dbg_taskm, vha, 0x801d, |
353 | fmt: "Scheduling enable of Vport %d.\n" , vha->vp_idx); |
354 | |
355 | return qla24xx_enable_vp(vha); |
356 | } |
357 | |
358 | static int |
359 | qla2x00_do_dpc_vp(scsi_qla_host_t *vha) |
360 | { |
361 | struct qla_hw_data *ha = vha->hw; |
362 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
363 | |
364 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, |
365 | fmt: "Entering %s vp_flags: 0x%lx.\n" , __func__, vha->vp_flags); |
366 | |
367 | /* Check if Fw is ready to configure VP first */ |
368 | if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { |
369 | if (test_and_clear_bit(VP_IDX_ACQUIRED, addr: &vha->vp_flags)) { |
370 | /* VP acquired. complete port configuration */ |
371 | ql_dbg(ql_dbg_dpc, vha, 0x4014, |
372 | fmt: "Configure VP scheduled.\n" ); |
373 | qla24xx_configure_vp(vha); |
374 | ql_dbg(ql_dbg_dpc, vha, 0x4015, |
375 | fmt: "Configure VP end.\n" ); |
376 | return 0; |
377 | } |
378 | } |
379 | |
380 | if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) { |
381 | if (atomic_read(v: &vha->loop_state) == LOOP_READY) { |
382 | qla24xx_process_purex_list(&vha->purex_list); |
383 | clear_bit(PROCESS_PUREX_IOCB, addr: &vha->dpc_flags); |
384 | } |
385 | } |
386 | |
387 | if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && |
388 | !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && |
389 | atomic_read(v: &vha->loop_state) != LOOP_DOWN) { |
390 | |
391 | if (!vha->relogin_jif || |
392 | time_after_eq(jiffies, vha->relogin_jif)) { |
393 | vha->relogin_jif = jiffies + HZ; |
394 | clear_bit(RELOGIN_NEEDED, addr: &vha->dpc_flags); |
395 | |
396 | ql_dbg(ql_dbg_dpc, vha, 0x4018, |
397 | fmt: "Relogin needed scheduled.\n" ); |
398 | qla24xx_post_relogin_work(vha); |
399 | } |
400 | } |
401 | |
402 | if (test_and_clear_bit(RESET_MARKER_NEEDED, addr: &vha->dpc_flags) && |
403 | (!(test_and_set_bit(RESET_ACTIVE, addr: &vha->dpc_flags)))) { |
404 | clear_bit(RESET_ACTIVE, addr: &vha->dpc_flags); |
405 | } |
406 | |
407 | if (test_and_clear_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags)) { |
408 | if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, addr: &vha->dpc_flags))) { |
409 | ql_dbg(ql_dbg_dpc, vha, 0x401a, |
410 | fmt: "Loop resync scheduled.\n" ); |
411 | qla2x00_loop_resync(vha); |
412 | clear_bit(LOOP_RESYNC_ACTIVE, addr: &vha->dpc_flags); |
413 | ql_dbg(ql_dbg_dpc, vha, 0x401b, |
414 | fmt: "Loop resync end.\n" ); |
415 | } |
416 | } |
417 | |
418 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, |
419 | fmt: "Exiting %s.\n" , __func__); |
420 | return 0; |
421 | } |
422 | |
423 | void |
424 | qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) |
425 | { |
426 | struct qla_hw_data *ha = vha->hw; |
427 | scsi_qla_host_t *vp, *tvp; |
428 | unsigned long flags = 0; |
429 | |
430 | if (vha->vp_idx) |
431 | return; |
432 | if (list_empty(head: &ha->vp_list)) |
433 | return; |
434 | |
435 | clear_bit(VP_DPC_NEEDED, addr: &vha->dpc_flags); |
436 | |
437 | if (!(ha->current_topology & ISP_CFG_F)) |
438 | return; |
439 | |
440 | spin_lock_irqsave(&ha->vport_slock, flags); |
441 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
442 | if (vp->vp_idx) { |
443 | atomic_inc(v: &vp->vref_count); |
444 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
445 | |
446 | qla2x00_do_dpc_vp(vha: vp); |
447 | |
448 | spin_lock_irqsave(&ha->vport_slock, flags); |
449 | atomic_dec(v: &vp->vref_count); |
450 | } |
451 | } |
452 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
453 | } |
454 | |
455 | int |
456 | qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) |
457 | { |
458 | scsi_qla_host_t *base_vha = shost_priv(shost: fc_vport->shost); |
459 | struct qla_hw_data *ha = base_vha->hw; |
460 | scsi_qla_host_t *vha; |
461 | uint8_t port_name[WWN_SIZE]; |
462 | |
463 | if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) |
464 | return VPCERR_UNSUPPORTED; |
465 | |
466 | /* Check up the F/W and H/W support NPIV */ |
467 | if (!ha->flags.npiv_supported) |
468 | return VPCERR_UNSUPPORTED; |
469 | |
470 | /* Check up whether npiv supported switch presented */ |
471 | if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) |
472 | return VPCERR_NO_FABRIC_SUPP; |
473 | |
474 | /* Check up unique WWPN */ |
475 | u64_to_wwn(inm: fc_vport->port_name, wwn: port_name); |
476 | if (!memcmp(p: port_name, q: base_vha->port_name, WWN_SIZE)) |
477 | return VPCERR_BAD_WWN; |
478 | vha = qla24xx_find_vhost_by_name(ha, port_name); |
479 | if (vha) |
480 | return VPCERR_BAD_WWN; |
481 | |
482 | /* Check up max-npiv-supports */ |
483 | if (ha->num_vhosts > ha->max_npiv_vports) { |
484 | ql_dbg(ql_dbg_vport, vha, 0xa004, |
485 | fmt: "num_vhosts %ud is bigger " |
486 | "than max_npiv_vports %ud.\n" , |
487 | ha->num_vhosts, ha->max_npiv_vports); |
488 | return VPCERR_UNSUPPORTED; |
489 | } |
490 | return 0; |
491 | } |
492 | |
493 | scsi_qla_host_t * |
494 | qla24xx_create_vhost(struct fc_vport *fc_vport) |
495 | { |
496 | scsi_qla_host_t *base_vha = shost_priv(shost: fc_vport->shost); |
497 | struct qla_hw_data *ha = base_vha->hw; |
498 | scsi_qla_host_t *vha; |
499 | const struct scsi_host_template *sht = &qla2xxx_driver_template; |
500 | struct Scsi_Host *host; |
501 | |
502 | vha = qla2x00_create_host(sht, ha); |
503 | if (!vha) { |
504 | ql_log(ql_log_warn, vha, 0xa005, |
505 | fmt: "scsi_host_alloc() failed for vport.\n" ); |
506 | return(NULL); |
507 | } |
508 | |
509 | host = vha->host; |
510 | fc_vport->dd_data = vha; |
511 | /* New host info */ |
512 | u64_to_wwn(inm: fc_vport->node_name, wwn: vha->node_name); |
513 | u64_to_wwn(inm: fc_vport->port_name, wwn: vha->port_name); |
514 | |
515 | vha->fc_vport = fc_vport; |
516 | vha->device_flags = 0; |
517 | vha->vp_idx = qla24xx_allocate_vp_id(vha); |
518 | if (vha->vp_idx > ha->max_npiv_vports) { |
519 | ql_dbg(ql_dbg_vport, vha, 0xa006, |
520 | fmt: "Couldn't allocate vp_id.\n" ); |
521 | goto create_vhost_failed; |
522 | } |
523 | vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); |
524 | |
525 | vha->dpc_flags = 0L; |
526 | ha->dpc_active = 0; |
527 | set_bit(REGISTER_FDMI_NEEDED, addr: &vha->dpc_flags); |
528 | set_bit(REGISTER_FC4_NEEDED, addr: &vha->dpc_flags); |
529 | |
530 | /* |
531 | * To fix the issue of processing a parent's RSCN for the vport before |
532 | * its SCR is complete. |
533 | */ |
534 | set_bit(VP_SCR_NEEDED, addr: &vha->vp_flags); |
535 | atomic_set(v: &vha->loop_state, LOOP_DOWN); |
536 | atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME); |
537 | |
538 | qla2x00_start_timer(vha, WATCH_INTERVAL); |
539 | |
540 | vha->req = base_vha->req; |
541 | vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; |
542 | host->can_queue = base_vha->req->length + 128; |
543 | host->cmd_per_lun = 3; |
544 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
545 | host->max_cmd_len = 32; |
546 | else |
547 | host->max_cmd_len = MAX_CMDSZ; |
548 | host->max_channel = MAX_BUSES - 1; |
549 | host->max_lun = ql2xmaxlun; |
550 | host->unique_id = host->host_no; |
551 | host->max_id = ha->max_fibre_devices; |
552 | host->transportt = qla2xxx_transport_vport_template; |
553 | |
554 | ql_dbg(ql_dbg_vport, vha, 0xa007, |
555 | fmt: "Detect vport hba %ld at address = %p.\n" , |
556 | vha->host_no, vha); |
557 | |
558 | vha->flags.init_done = 1; |
559 | |
560 | mutex_lock(&ha->vport_lock); |
561 | set_bit(nr: vha->vp_idx, addr: ha->vp_idx_map); |
562 | ha->cur_vport_count++; |
563 | mutex_unlock(lock: &ha->vport_lock); |
564 | |
565 | return vha; |
566 | |
567 | create_vhost_failed: |
568 | return NULL; |
569 | } |
570 | |
571 | static void |
572 | qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) |
573 | { |
574 | struct qla_hw_data *ha = vha->hw; |
575 | uint16_t que_id = req->id; |
576 | |
577 | dma_free_coherent(dev: &ha->pdev->dev, size: (req->length + 1) * |
578 | sizeof(request_t), cpu_addr: req->ring, dma_handle: req->dma); |
579 | req->ring = NULL; |
580 | req->dma = 0; |
581 | if (que_id) { |
582 | ha->req_q_map[que_id] = NULL; |
583 | mutex_lock(&ha->vport_lock); |
584 | clear_bit(nr: que_id, addr: ha->req_qid_map); |
585 | mutex_unlock(lock: &ha->vport_lock); |
586 | } |
587 | kfree(objp: req->outstanding_cmds); |
588 | kfree(objp: req); |
589 | } |
590 | |
591 | static void |
592 | qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) |
593 | { |
594 | struct qla_hw_data *ha = vha->hw; |
595 | uint16_t que_id = rsp->id; |
596 | |
597 | if (rsp->msix && rsp->msix->have_irq) { |
598 | free_irq(rsp->msix->vector, rsp->msix->handle); |
599 | rsp->msix->have_irq = 0; |
600 | rsp->msix->in_use = 0; |
601 | rsp->msix->handle = NULL; |
602 | } |
603 | dma_free_coherent(dev: &ha->pdev->dev, size: (rsp->length + 1) * |
604 | sizeof(response_t), cpu_addr: rsp->ring, dma_handle: rsp->dma); |
605 | rsp->ring = NULL; |
606 | rsp->dma = 0; |
607 | if (que_id) { |
608 | ha->rsp_q_map[que_id] = NULL; |
609 | mutex_lock(&ha->vport_lock); |
610 | clear_bit(nr: que_id, addr: ha->rsp_qid_map); |
611 | mutex_unlock(lock: &ha->vport_lock); |
612 | } |
613 | kfree(objp: rsp); |
614 | } |
615 | |
616 | int |
617 | qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) |
618 | { |
619 | int ret = QLA_SUCCESS; |
620 | |
621 | if (req && vha->flags.qpairs_req_created) { |
622 | req->options |= BIT_0; |
623 | ret = qla25xx_init_req_que(vha, req); |
624 | if (ret != QLA_SUCCESS) |
625 | return QLA_FUNCTION_FAILED; |
626 | |
627 | qla25xx_free_req_que(vha, req); |
628 | } |
629 | |
630 | return ret; |
631 | } |
632 | |
633 | int |
634 | qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) |
635 | { |
636 | int ret = QLA_SUCCESS; |
637 | |
638 | if (rsp && vha->flags.qpairs_rsp_created) { |
639 | rsp->options |= BIT_0; |
640 | ret = qla25xx_init_rsp_que(vha, rsp); |
641 | if (ret != QLA_SUCCESS) |
642 | return QLA_FUNCTION_FAILED; |
643 | |
644 | qla25xx_free_rsp_que(vha, rsp); |
645 | } |
646 | |
647 | return ret; |
648 | } |
649 | |
650 | /* Delete all queues for a given vhost */ |
651 | int |
652 | qla25xx_delete_queues(struct scsi_qla_host *vha) |
653 | { |
654 | int cnt, ret = 0; |
655 | struct req_que *req = NULL; |
656 | struct rsp_que *rsp = NULL; |
657 | struct qla_hw_data *ha = vha->hw; |
658 | struct qla_qpair *qpair, *tqpair; |
659 | |
660 | if (ql2xmqsupport || ql2xnvmeenable) { |
661 | list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, |
662 | qp_list_elem) |
663 | qla2xxx_delete_qpair(vha, qpair); |
664 | } else { |
665 | /* Delete request queues */ |
666 | for (cnt = 1; cnt < ha->max_req_queues; cnt++) { |
667 | req = ha->req_q_map[cnt]; |
668 | if (req && test_bit(cnt, ha->req_qid_map)) { |
669 | ret = qla25xx_delete_req_que(vha, req); |
670 | if (ret != QLA_SUCCESS) { |
671 | ql_log(ql_log_warn, vha, 0x00ea, |
672 | fmt: "Couldn't delete req que %d.\n" , |
673 | req->id); |
674 | return ret; |
675 | } |
676 | } |
677 | } |
678 | |
679 | /* Delete response queues */ |
680 | for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { |
681 | rsp = ha->rsp_q_map[cnt]; |
682 | if (rsp && test_bit(cnt, ha->rsp_qid_map)) { |
683 | ret = qla25xx_delete_rsp_que(vha, rsp); |
684 | if (ret != QLA_SUCCESS) { |
685 | ql_log(ql_log_warn, vha, 0x00eb, |
686 | fmt: "Couldn't delete rsp que %d.\n" , |
687 | rsp->id); |
688 | return ret; |
689 | } |
690 | } |
691 | } |
692 | } |
693 | |
694 | return ret; |
695 | } |
696 | |
697 | int |
698 | qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, |
699 | uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) |
700 | { |
701 | int ret = 0; |
702 | struct req_que *req = NULL; |
703 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
704 | struct scsi_qla_host *vha = pci_get_drvdata(pdev: ha->pdev); |
705 | uint16_t que_id = 0; |
706 | device_reg_t *reg; |
707 | uint32_t cnt; |
708 | |
709 | req = kzalloc(size: sizeof(struct req_que), GFP_KERNEL); |
710 | if (req == NULL) { |
711 | ql_log(ql_log_fatal, vha: base_vha, 0x00d9, |
712 | fmt: "Failed to allocate memory for request queue.\n" ); |
713 | goto failed; |
714 | } |
715 | |
716 | req->length = REQUEST_ENTRY_CNT_24XX; |
717 | req->ring = dma_alloc_coherent(dev: &ha->pdev->dev, |
718 | size: (req->length + 1) * sizeof(request_t), |
719 | dma_handle: &req->dma, GFP_KERNEL); |
720 | if (req->ring == NULL) { |
721 | ql_log(ql_log_fatal, vha: base_vha, 0x00da, |
722 | fmt: "Failed to allocate memory for request_ring.\n" ); |
723 | goto que_failed; |
724 | } |
725 | |
726 | ret = qla2x00_alloc_outstanding_cmds(ha, req); |
727 | if (ret != QLA_SUCCESS) |
728 | goto que_failed; |
729 | |
730 | mutex_lock(&ha->mq_lock); |
731 | que_id = find_first_zero_bit(addr: ha->req_qid_map, size: ha->max_req_queues); |
732 | if (que_id >= ha->max_req_queues) { |
733 | mutex_unlock(lock: &ha->mq_lock); |
734 | ql_log(ql_log_warn, vha: base_vha, 0x00db, |
735 | fmt: "No resources to create additional request queue.\n" ); |
736 | goto que_failed; |
737 | } |
738 | set_bit(nr: que_id, addr: ha->req_qid_map); |
739 | ha->req_q_map[que_id] = req; |
740 | req->rid = rid; |
741 | req->vp_idx = vp_idx; |
742 | req->qos = qos; |
743 | |
744 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc002, |
745 | fmt: "queue_id=%d rid=%d vp_idx=%d qos=%d.\n" , |
746 | que_id, req->rid, req->vp_idx, req->qos); |
747 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00dc, |
748 | fmt: "queue_id=%d rid=%d vp_idx=%d qos=%d.\n" , |
749 | que_id, req->rid, req->vp_idx, req->qos); |
750 | if (rsp_que < 0) |
751 | req->rsp = NULL; |
752 | else |
753 | req->rsp = ha->rsp_q_map[rsp_que]; |
754 | /* Use alternate PCI bus number */ |
755 | if (MSB(req->rid)) |
756 | options |= BIT_4; |
757 | /* Use alternate PCI devfn */ |
758 | if (LSB(req->rid)) |
759 | options |= BIT_5; |
760 | req->options = options; |
761 | |
762 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc003, |
763 | fmt: "options=0x%x.\n" , req->options); |
764 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00dd, |
765 | fmt: "options=0x%x.\n" , req->options); |
766 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) |
767 | req->outstanding_cmds[cnt] = NULL; |
768 | req->current_outstanding_cmd = 1; |
769 | |
770 | req->ring_ptr = req->ring; |
771 | req->ring_index = 0; |
772 | req->cnt = req->length; |
773 | req->id = que_id; |
774 | reg = ISP_QUE_REG(ha, que_id); |
775 | req->req_q_in = ®->isp25mq.req_q_in; |
776 | req->req_q_out = ®->isp25mq.req_q_out; |
777 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; |
778 | req->out_ptr = (uint16_t *)(req->ring + req->length); |
779 | mutex_unlock(lock: &ha->mq_lock); |
780 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc004, |
781 | fmt: "ring_ptr=%p ring_index=%d, " |
782 | "cnt=%d id=%d max_q_depth=%d.\n" , |
783 | req->ring_ptr, req->ring_index, |
784 | req->cnt, req->id, req->max_q_depth); |
785 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00de, |
786 | fmt: "ring_ptr=%p ring_index=%d, " |
787 | "cnt=%d id=%d max_q_depth=%d.\n" , |
788 | req->ring_ptr, req->ring_index, req->cnt, |
789 | req->id, req->max_q_depth); |
790 | |
791 | if (startqp) { |
792 | ret = qla25xx_init_req_que(base_vha, req); |
793 | if (ret != QLA_SUCCESS) { |
794 | ql_log(ql_log_fatal, vha: base_vha, 0x00df, |
795 | fmt: "%s failed.\n" , __func__); |
796 | mutex_lock(&ha->mq_lock); |
797 | clear_bit(nr: que_id, addr: ha->req_qid_map); |
798 | mutex_unlock(lock: &ha->mq_lock); |
799 | goto que_failed; |
800 | } |
801 | vha->flags.qpairs_req_created = 1; |
802 | } |
803 | |
804 | return req->id; |
805 | |
806 | que_failed: |
807 | qla25xx_free_req_que(vha: base_vha, req); |
808 | failed: |
809 | return 0; |
810 | } |
811 | |
812 | static void qla_do_work(struct work_struct *work) |
813 | { |
814 | unsigned long flags; |
815 | struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); |
816 | struct scsi_qla_host *vha = qpair->vha; |
817 | |
818 | spin_lock_irqsave(&qpair->qp_lock, flags); |
819 | qla24xx_process_response_queue(vha, qpair->rsp); |
820 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
821 | |
822 | } |
823 | |
824 | /* create response queue */ |
825 | int |
826 | qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, |
827 | uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) |
828 | { |
829 | int ret = 0; |
830 | struct rsp_que *rsp = NULL; |
831 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
832 | struct scsi_qla_host *vha = pci_get_drvdata(pdev: ha->pdev); |
833 | uint16_t que_id = 0; |
834 | device_reg_t *reg; |
835 | |
836 | rsp = kzalloc(size: sizeof(struct rsp_que), GFP_KERNEL); |
837 | if (rsp == NULL) { |
838 | ql_log(ql_log_warn, vha: base_vha, 0x0066, |
839 | fmt: "Failed to allocate memory for response queue.\n" ); |
840 | goto failed; |
841 | } |
842 | |
843 | rsp->length = RESPONSE_ENTRY_CNT_MQ; |
844 | rsp->ring = dma_alloc_coherent(dev: &ha->pdev->dev, |
845 | size: (rsp->length + 1) * sizeof(response_t), |
846 | dma_handle: &rsp->dma, GFP_KERNEL); |
847 | if (rsp->ring == NULL) { |
848 | ql_log(ql_log_warn, vha: base_vha, 0x00e1, |
849 | fmt: "Failed to allocate memory for response ring.\n" ); |
850 | goto que_failed; |
851 | } |
852 | |
853 | mutex_lock(&ha->mq_lock); |
854 | que_id = find_first_zero_bit(addr: ha->rsp_qid_map, size: ha->max_rsp_queues); |
855 | if (que_id >= ha->max_rsp_queues) { |
856 | mutex_unlock(lock: &ha->mq_lock); |
857 | ql_log(ql_log_warn, vha: base_vha, 0x00e2, |
858 | fmt: "No resources to create additional request queue.\n" ); |
859 | goto que_failed; |
860 | } |
861 | set_bit(nr: que_id, addr: ha->rsp_qid_map); |
862 | |
863 | rsp->msix = qpair->msix; |
864 | |
865 | ha->rsp_q_map[que_id] = rsp; |
866 | rsp->rid = rid; |
867 | rsp->vp_idx = vp_idx; |
868 | rsp->hw = ha; |
869 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00e4, |
870 | fmt: "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n" , |
871 | que_id, rsp->rid, rsp->vp_idx, rsp->hw); |
872 | /* Use alternate PCI bus number */ |
873 | if (MSB(rsp->rid)) |
874 | options |= BIT_4; |
875 | /* Use alternate PCI devfn */ |
876 | if (LSB(rsp->rid)) |
877 | options |= BIT_5; |
878 | /* Enable MSIX handshake mode on for uncapable adapters */ |
879 | if (!IS_MSIX_NACK_CAPABLE(ha)) |
880 | options |= BIT_6; |
881 | |
882 | /* Set option to indicate response queue creation */ |
883 | options |= BIT_1; |
884 | |
885 | rsp->options = options; |
886 | rsp->id = que_id; |
887 | reg = ISP_QUE_REG(ha, que_id); |
888 | rsp->rsp_q_in = ®->isp25mq.rsp_q_in; |
889 | rsp->rsp_q_out = ®->isp25mq.rsp_q_out; |
890 | rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); |
891 | mutex_unlock(lock: &ha->mq_lock); |
892 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc00b, |
893 | fmt: "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n" , |
894 | rsp->options, rsp->id, rsp->rsp_q_in, |
895 | rsp->rsp_q_out); |
896 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00e5, |
897 | fmt: "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n" , |
898 | rsp->options, rsp->id, rsp->rsp_q_in, |
899 | rsp->rsp_q_out); |
900 | |
901 | ret = qla25xx_request_irq(ha, qpair, qpair->msix, |
902 | ha->flags.disable_msix_handshake ? |
903 | QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); |
904 | if (ret) |
905 | goto que_failed; |
906 | |
907 | if (startqp) { |
908 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
909 | if (ret != QLA_SUCCESS) { |
910 | ql_log(ql_log_fatal, vha: base_vha, 0x00e7, |
911 | fmt: "%s failed.\n" , __func__); |
912 | mutex_lock(&ha->mq_lock); |
913 | clear_bit(nr: que_id, addr: ha->rsp_qid_map); |
914 | mutex_unlock(lock: &ha->mq_lock); |
915 | goto que_failed; |
916 | } |
917 | vha->flags.qpairs_rsp_created = 1; |
918 | } |
919 | rsp->req = NULL; |
920 | |
921 | qla2x00_init_response_q_entries(rsp); |
922 | if (qpair->hw->wq) |
923 | INIT_WORK(&qpair->q_work, qla_do_work); |
924 | return rsp->id; |
925 | |
926 | que_failed: |
927 | qla25xx_free_rsp_que(vha: base_vha, rsp); |
928 | failed: |
929 | return 0; |
930 | } |
931 | |
932 | static void qla_ctrlvp_sp_done(srb_t *sp, int res) |
933 | { |
934 | if (sp->comp) |
935 | complete(sp->comp); |
936 | /* don't free sp here. Let the caller do the free */ |
937 | } |
938 | |
939 | /** |
940 | * qla24xx_control_vp() - Enable a virtual port for given host |
941 | * @vha: adapter block pointer |
942 | * @cmd: command type to be sent for enable virtual port |
943 | * |
944 | * Return: qla2xxx local function return status code. |
945 | */ |
946 | int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) |
947 | { |
948 | int rval = QLA_MEMORY_ALLOC_FAILED; |
949 | struct qla_hw_data *ha = vha->hw; |
950 | int vp_index = vha->vp_idx; |
951 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
952 | DECLARE_COMPLETION_ONSTACK(comp); |
953 | srb_t *sp; |
954 | |
955 | ql_dbg(ql_dbg_vport, vha, 0x10c1, |
956 | fmt: "Entered %s cmd %x index %d.\n" , __func__, cmd, vp_index); |
957 | |
958 | if (vp_index == 0 || vp_index >= ha->max_npiv_vports) |
959 | return QLA_PARAMETER_ERROR; |
960 | |
961 | /* ref: INIT */ |
962 | sp = qla2x00_get_sp(vha: base_vha, NULL, GFP_KERNEL); |
963 | if (!sp) |
964 | return rval; |
965 | |
966 | sp->type = SRB_CTRL_VP; |
967 | sp->name = "ctrl_vp" ; |
968 | sp->comp = ∁ |
969 | qla2x00_init_async_sp(sp, tmo: qla2x00_get_async_timeout(vha) + 2, |
970 | done: qla_ctrlvp_sp_done); |
971 | sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; |
972 | sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; |
973 | |
974 | rval = qla2x00_start_sp(sp); |
975 | if (rval != QLA_SUCCESS) { |
976 | ql_dbg(ql_dbg_async, vha, 0xffff, |
977 | fmt: "%s: %s Failed submission. %x.\n" , |
978 | __func__, sp->name, rval); |
979 | goto done; |
980 | } |
981 | |
982 | ql_dbg(ql_dbg_vport, vha, 0x113f, fmt: "%s hndl %x submitted\n" , |
983 | sp->name, sp->handle); |
984 | |
985 | wait_for_completion(&comp); |
986 | sp->comp = NULL; |
987 | |
988 | rval = sp->rc; |
989 | switch (rval) { |
990 | case QLA_FUNCTION_TIMEOUT: |
991 | ql_dbg(ql_dbg_vport, vha, 0xffff, fmt: "%s: %s Timeout. %x.\n" , |
992 | __func__, sp->name, rval); |
993 | break; |
994 | case QLA_SUCCESS: |
995 | ql_dbg(ql_dbg_vport, vha, 0xffff, fmt: "%s: %s done.\n" , |
996 | __func__, sp->name); |
997 | break; |
998 | default: |
999 | ql_dbg(ql_dbg_vport, vha, 0xffff, fmt: "%s: %s Failed. %x.\n" , |
1000 | __func__, sp->name, rval); |
1001 | break; |
1002 | } |
1003 | done: |
1004 | /* ref: INIT */ |
1005 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
1006 | return rval; |
1007 | } |
1008 | |
1009 | struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) |
1010 | { |
1011 | struct qla_hw_data *ha = vha->hw; |
1012 | |
1013 | if (vha->vp_idx == vp_idx) |
1014 | return vha; |
1015 | |
1016 | BUG_ON(ha->vp_map == NULL); |
1017 | if (likely(test_bit(vp_idx, ha->vp_idx_map))) |
1018 | return ha->vp_map[vp_idx].vha; |
1019 | |
1020 | return NULL; |
1021 | } |
1022 | |
1023 | /* vport_slock to be held by the caller */ |
1024 | void |
1025 | qla_update_vp_map(struct scsi_qla_host *vha, int cmd) |
1026 | { |
1027 | void *slot; |
1028 | u32 key; |
1029 | int rc; |
1030 | |
1031 | if (!vha->hw->vp_map) |
1032 | return; |
1033 | |
1034 | key = vha->d_id.b24; |
1035 | |
1036 | switch (cmd) { |
1037 | case SET_VP_IDX: |
1038 | vha->hw->vp_map[vha->vp_idx].vha = vha; |
1039 | break; |
1040 | case SET_AL_PA: |
1041 | slot = btree_lookup32(head: &vha->hw->host_map, key); |
1042 | if (!slot) { |
1043 | ql_dbg(ql_dbg_disc, vha, 0xf018, |
1044 | fmt: "Save vha in host_map %p %06x\n" , vha, key); |
1045 | rc = btree_insert32(head: &vha->hw->host_map, |
1046 | key, val: vha, GFP_ATOMIC); |
1047 | if (rc) |
1048 | ql_log(ql_log_info, vha, 0xd03e, |
1049 | fmt: "Unable to insert s_id into host_map: %06x\n" , |
1050 | key); |
1051 | return; |
1052 | } |
1053 | ql_dbg(ql_dbg_disc, vha, 0xf019, |
1054 | fmt: "replace existing vha in host_map %p %06x\n" , vha, key); |
1055 | btree_update32(head: &vha->hw->host_map, key, val: vha); |
1056 | break; |
1057 | case RESET_VP_IDX: |
1058 | vha->hw->vp_map[vha->vp_idx].vha = NULL; |
1059 | break; |
1060 | case RESET_AL_PA: |
1061 | ql_dbg(ql_dbg_disc, vha, 0xf01a, |
1062 | fmt: "clear vha in host_map %p %06x\n" , vha, key); |
1063 | slot = btree_lookup32(head: &vha->hw->host_map, key); |
1064 | if (slot) |
1065 | btree_remove32(head: &vha->hw->host_map, key); |
1066 | vha->d_id.b24 = 0; |
1067 | break; |
1068 | } |
1069 | } |
1070 | |
1071 | void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) |
1072 | { |
1073 | |
1074 | if (!vha->d_id.b24) { |
1075 | vha->d_id = id; |
1076 | qla_update_vp_map(vha, SET_AL_PA); |
1077 | } else if (vha->d_id.b24 != id.b24) { |
1078 | qla_update_vp_map(vha, RESET_AL_PA); |
1079 | vha->d_id = id; |
1080 | qla_update_vp_map(vha, SET_AL_PA); |
1081 | } |
1082 | } |
1083 | |
1084 | int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) |
1085 | { |
1086 | int sz; |
1087 | |
1088 | qp->buf_pool.num_bufs = qp->req->length; |
1089 | |
1090 | sz = BITS_TO_LONGS(qp->req->length); |
1091 | qp->buf_pool.buf_map = kcalloc(n: sz, size: sizeof(long), GFP_KERNEL); |
1092 | if (!qp->buf_pool.buf_map) { |
1093 | ql_log(ql_log_warn, vha, 0x0186, |
1094 | fmt: "Failed to allocate buf_map(%zd).\n" , sz * sizeof(unsigned long)); |
1095 | return -ENOMEM; |
1096 | } |
1097 | sz = qp->req->length * sizeof(void *); |
1098 | qp->buf_pool.buf_array = kcalloc(n: qp->req->length, size: sizeof(void *), GFP_KERNEL); |
1099 | if (!qp->buf_pool.buf_array) { |
1100 | ql_log(ql_log_warn, vha, 0x0186, |
1101 | fmt: "Failed to allocate buf_array(%d).\n" , sz); |
1102 | kfree(objp: qp->buf_pool.buf_map); |
1103 | return -ENOMEM; |
1104 | } |
1105 | sz = qp->req->length * sizeof(dma_addr_t); |
1106 | qp->buf_pool.dma_array = kcalloc(n: qp->req->length, size: sizeof(dma_addr_t), GFP_KERNEL); |
1107 | if (!qp->buf_pool.dma_array) { |
1108 | ql_log(ql_log_warn, vha, 0x0186, |
1109 | fmt: "Failed to allocate dma_array(%d).\n" , sz); |
1110 | kfree(objp: qp->buf_pool.buf_map); |
1111 | kfree(objp: qp->buf_pool.buf_array); |
1112 | return -ENOMEM; |
1113 | } |
1114 | set_bit(nr: 0, addr: qp->buf_pool.buf_map); |
1115 | return 0; |
1116 | } |
1117 | |
1118 | void qla_free_buf_pool(struct qla_qpair *qp) |
1119 | { |
1120 | int i; |
1121 | struct qla_hw_data *ha = qp->vha->hw; |
1122 | |
1123 | for (i = 0; i < qp->buf_pool.num_bufs; i++) { |
1124 | if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) |
1125 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: qp->buf_pool.buf_array[i], |
1126 | addr: qp->buf_pool.dma_array[i]); |
1127 | qp->buf_pool.buf_array[i] = NULL; |
1128 | qp->buf_pool.dma_array[i] = 0; |
1129 | } |
1130 | |
1131 | kfree(objp: qp->buf_pool.dma_array); |
1132 | kfree(objp: qp->buf_pool.buf_array); |
1133 | kfree(objp: qp->buf_pool.buf_map); |
1134 | } |
1135 | |
1136 | /* it is assume qp->qp_lock is held at this point */ |
1137 | int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) |
1138 | { |
1139 | u16 tag, i = 0; |
1140 | void *buf; |
1141 | dma_addr_t buf_dma; |
1142 | struct qla_hw_data *ha = vha->hw; |
1143 | |
1144 | dsc->tag = TAG_FREED; |
1145 | again: |
1146 | tag = find_first_zero_bit(addr: qp->buf_pool.buf_map, size: qp->buf_pool.num_bufs); |
1147 | if (tag >= qp->buf_pool.num_bufs) { |
1148 | ql_dbg(ql_dbg_io, vha, 0x00e2, |
1149 | fmt: "qp(%d) ran out of buf resource.\n" , qp->id); |
1150 | return -EIO; |
1151 | } |
1152 | if (tag == 0) { |
1153 | set_bit(nr: 0, addr: qp->buf_pool.buf_map); |
1154 | i++; |
1155 | if (i == 5) { |
1156 | ql_dbg(ql_dbg_io, vha, 0x00e3, |
1157 | fmt: "qp(%d) unable to get tag.\n" , qp->id); |
1158 | return -EIO; |
1159 | } |
1160 | goto again; |
1161 | } |
1162 | |
1163 | if (!qp->buf_pool.buf_array[tag]) { |
1164 | buf = dma_pool_zalloc(pool: ha->fcp_cmnd_dma_pool, GFP_ATOMIC, handle: &buf_dma); |
1165 | if (!buf) { |
1166 | ql_log(ql_log_fatal, vha, 0x13b1, |
1167 | fmt: "Failed to allocate buf.\n" ); |
1168 | return -ENOMEM; |
1169 | } |
1170 | |
1171 | dsc->buf = qp->buf_pool.buf_array[tag] = buf; |
1172 | dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; |
1173 | qp->buf_pool.num_alloc++; |
1174 | } else { |
1175 | dsc->buf = qp->buf_pool.buf_array[tag]; |
1176 | dsc->buf_dma = qp->buf_pool.dma_array[tag]; |
1177 | memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); |
1178 | } |
1179 | |
1180 | qp->buf_pool.num_active++; |
1181 | if (qp->buf_pool.num_active > qp->buf_pool.max_used) |
1182 | qp->buf_pool.max_used = qp->buf_pool.num_active; |
1183 | |
1184 | dsc->tag = tag; |
1185 | set_bit(nr: tag, addr: qp->buf_pool.buf_map); |
1186 | return 0; |
1187 | } |
1188 | |
1189 | static void qla_trim_buf(struct qla_qpair *qp, u16 trim) |
1190 | { |
1191 | int i, j; |
1192 | struct qla_hw_data *ha = qp->vha->hw; |
1193 | |
1194 | if (!trim) |
1195 | return; |
1196 | |
1197 | for (i = 0; i < trim; i++) { |
1198 | j = qp->buf_pool.num_alloc - 1; |
1199 | if (test_bit(j, qp->buf_pool.buf_map)) { |
1200 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha: qp->vha, 0x300b, |
1201 | fmt: "QP id(%d): trim active buf[%d]. Remain %d bufs\n" , |
1202 | qp->id, j, qp->buf_pool.num_alloc); |
1203 | return; |
1204 | } |
1205 | |
1206 | if (qp->buf_pool.buf_array[j]) { |
1207 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: qp->buf_pool.buf_array[j], |
1208 | addr: qp->buf_pool.dma_array[j]); |
1209 | qp->buf_pool.buf_array[j] = NULL; |
1210 | qp->buf_pool.dma_array[j] = 0; |
1211 | } |
1212 | qp->buf_pool.num_alloc--; |
1213 | if (!qp->buf_pool.num_alloc) |
1214 | break; |
1215 | } |
1216 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha: qp->vha, 0x3010, |
1217 | fmt: "QP id(%d): trimmed %d bufs. Remain %d bufs\n" , |
1218 | qp->id, trim, qp->buf_pool.num_alloc); |
1219 | } |
1220 | |
1221 | static void __qla_adjust_buf(struct qla_qpair *qp) |
1222 | { |
1223 | u32 trim; |
1224 | |
1225 | qp->buf_pool.take_snapshot = 0; |
1226 | qp->buf_pool.prev_max = qp->buf_pool.max_used; |
1227 | qp->buf_pool.max_used = qp->buf_pool.num_active; |
1228 | |
1229 | if (qp->buf_pool.prev_max > qp->buf_pool.max_used && |
1230 | qp->buf_pool.num_alloc > qp->buf_pool.max_used) { |
1231 | /* down trend */ |
1232 | trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; |
1233 | trim = (trim * 10) / 100; |
1234 | trim = trim ? trim : 1; |
1235 | qla_trim_buf(qp, trim); |
1236 | } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { |
1237 | /* 2 periods of no io */ |
1238 | qla_trim_buf(qp, trim: qp->buf_pool.num_alloc); |
1239 | } |
1240 | } |
1241 | |
1242 | /* it is assume qp->qp_lock is held at this point */ |
1243 | void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) |
1244 | { |
1245 | if (dsc->tag == TAG_FREED) |
1246 | return; |
1247 | lockdep_assert_held(qp->qp_lock_ptr); |
1248 | |
1249 | clear_bit(nr: dsc->tag, addr: qp->buf_pool.buf_map); |
1250 | qp->buf_pool.num_active--; |
1251 | dsc->tag = TAG_FREED; |
1252 | |
1253 | if (qp->buf_pool.take_snapshot) |
1254 | __qla_adjust_buf(qp); |
1255 | } |
1256 | |
1257 | #define EXPIRE (60 * HZ) |
1258 | void qla_adjust_buf(struct scsi_qla_host *vha) |
1259 | { |
1260 | unsigned long flags; |
1261 | int i; |
1262 | struct qla_qpair *qp; |
1263 | |
1264 | if (vha->vp_idx) |
1265 | return; |
1266 | |
1267 | if (!vha->buf_expired) { |
1268 | vha->buf_expired = jiffies + EXPIRE; |
1269 | return; |
1270 | } |
1271 | if (time_before(jiffies, vha->buf_expired)) |
1272 | return; |
1273 | |
1274 | vha->buf_expired = jiffies + EXPIRE; |
1275 | |
1276 | for (i = 0; i < vha->hw->num_qpairs; i++) { |
1277 | qp = vha->hw->queue_pair_map[i]; |
1278 | if (!qp) |
1279 | continue; |
1280 | if (!qp->buf_pool.num_alloc) |
1281 | continue; |
1282 | |
1283 | if (qp->buf_pool.take_snapshot) { |
1284 | /* no io has gone through in the last EXPIRE period */ |
1285 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
1286 | __qla_adjust_buf(qp); |
1287 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags); |
1288 | } else { |
1289 | qp->buf_pool.take_snapshot = 1; |
1290 | } |
1291 | } |
1292 | } |
1293 | |