1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * QLogic iSCSI Offload Driver |
4 | * Copyright (c) 2016 Cavium Inc. |
5 | */ |
6 | |
7 | #ifndef _QEDI_H_ |
8 | #define _QEDI_H_ |
9 | |
10 | #define __PREVENT_QED_HSI__ |
11 | |
12 | #include <scsi/scsi_transport_iscsi.h> |
13 | #include <scsi/libiscsi.h> |
14 | #include <scsi/scsi_host.h> |
15 | #include <linux/uio_driver.h> |
16 | |
17 | #include "qedi_hsi.h" |
18 | #include <linux/qed/qed_if.h> |
19 | #include "qedi_dbg.h" |
20 | #include <linux/qed/qed_iscsi_if.h> |
21 | #include <linux/qed/qed_ll2_if.h> |
22 | #include "qedi_version.h" |
23 | #include "qedi_nvm_iscsi_cfg.h" |
24 | |
25 | #define QEDI_MODULE_NAME "qedi" |
26 | |
27 | struct qedi_endpoint; |
28 | |
29 | #ifndef GET_FIELD2 |
30 | #define GET_FIELD2(value, name) \ |
31 | (((value) & (name ## _MASK)) >> (name ## _OFFSET)) |
32 | #endif |
33 | |
34 | /* |
35 | * PCI function probe defines |
36 | */ |
37 | #define QEDI_MODE_NORMAL 0 |
38 | #define QEDI_MODE_RECOVERY 1 |
39 | #define QEDI_MODE_SHUTDOWN 2 |
40 | |
41 | #define ISCSI_WQE_SET_PTU_INVALIDATE 1 |
42 | #define QEDI_MAX_ISCSI_TASK 4096 |
43 | #define QEDI_MAX_TASK_NUM 0x0FFF |
44 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 |
45 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ |
46 | #define MAX_OUTSTANDING_TASKS_PER_CON 1024 |
47 | |
48 | #define QEDI_MAX_BD_LEN 0xffff |
49 | #define QEDI_BD_SPLIT_SZ 0x1000 |
50 | #define QEDI_PAGE_SIZE 4096 |
51 | #define QEDI_FAST_SGE_COUNT 4 |
52 | /* MAX Length for cached SGL */ |
53 | #define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) |
54 | |
55 | #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ |
56 | num_online_cpus()) |
57 | |
58 | #define QEDI_LOCAL_PORT_MIN 60000 |
59 | #define QEDI_LOCAL_PORT_MAX 61024 |
60 | #define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN) |
61 | #define QEDI_LOCAL_PORT_INVALID 0xffff |
62 | #define TX_RX_RING 16 |
63 | #define RX_RING (TX_RX_RING - 1) |
64 | #define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE) |
65 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) |
66 | |
67 | #define QEDI_HW_DMA_BOUNDARY 0xfff |
68 | #define QEDI_PATH_HANDLE 0xFE0000000UL |
69 | |
70 | enum qedi_nvm_tgts { |
71 | QEDI_NVM_TGT_PRI, |
72 | QEDI_NVM_TGT_SEC, |
73 | }; |
74 | |
75 | struct qedi_nvm_iscsi_image { |
76 | struct nvm_iscsi_cfg iscsi_cfg; |
77 | u32 crc; |
78 | }; |
79 | |
80 | struct qedi_uio_ctrl { |
81 | /* meta data */ |
82 | u32 uio_hsi_version; |
83 | |
84 | /* user writes */ |
85 | u32 host_tx_prod; |
86 | u32 host_rx_cons; |
87 | u32 host_rx_bd_cons; |
88 | u32 host_tx_pkt_len; |
89 | u32 host_rx_cons_cnt; |
90 | |
91 | /* driver writes */ |
92 | u32 hw_tx_cons; |
93 | u32 hw_rx_prod; |
94 | u32 hw_rx_bd_prod; |
95 | u32 hw_rx_prod_cnt; |
96 | |
97 | /* other */ |
98 | u8 mac_addr[6]; |
99 | u8 reserve[2]; |
100 | }; |
101 | |
102 | struct qedi_rx_bd { |
103 | u32 rx_pkt_index; |
104 | u32 rx_pkt_len; |
105 | u16 vlan_id; |
106 | }; |
107 | |
108 | #define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd)) |
109 | #define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1) |
110 | #define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1) |
111 | #define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1) |
112 | |
113 | #define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \ |
114 | (QEDI_MAX_RX_DESC_CNT - 1)) ? \ |
115 | (x) + 2 : (x) + 1) |
116 | |
117 | struct qedi_uio_dev { |
118 | struct uio_info qedi_uinfo; |
119 | u32 uio_dev; |
120 | struct list_head list; |
121 | |
122 | u32 ll2_ring_size; |
123 | void *ll2_ring; |
124 | |
125 | u32 ll2_buf_size; |
126 | void *ll2_buf; |
127 | |
128 | void *rx_pkt; |
129 | void *tx_pkt; |
130 | |
131 | struct qedi_ctx *qedi; |
132 | struct pci_dev *pdev; |
133 | void *uctrl; |
134 | }; |
135 | |
136 | /* List to maintain the skb pointers */ |
137 | struct skb_work_list { |
138 | struct list_head list; |
139 | struct sk_buff *skb; |
140 | u16 vlan_id; |
141 | }; |
142 | |
143 | /* Queue sizes in number of elements */ |
144 | #define QEDI_SQ_SIZE MAX_OUTSTANDING_TASKS_PER_CON |
145 | #define QEDI_CQ_SIZE 2048 |
146 | #define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK |
147 | #define QEDI_PROTO_CQ_PROD_IDX 0 |
148 | |
149 | struct qedi_glbl_q_params { |
150 | u64 hw_p_cq; /* Completion queue PBL */ |
151 | u64 hw_p_rq; /* Request queue PBL */ |
152 | u64 hw_p_cmdq; /* Command queue PBL */ |
153 | }; |
154 | |
155 | struct global_queue { |
156 | union iscsi_cqe *cq; |
157 | dma_addr_t cq_dma; |
158 | u32 cq_mem_size; |
159 | u32 cq_cons_idx; /* Completion queue consumer index */ |
160 | |
161 | void *cq_pbl; |
162 | dma_addr_t cq_pbl_dma; |
163 | u32 cq_pbl_size; |
164 | |
165 | }; |
166 | |
167 | struct qedi_fastpath { |
168 | struct qed_sb_info *sb_info; |
169 | u16 sb_id; |
170 | #define QEDI_NAME_SIZE 16 |
171 | char name[QEDI_NAME_SIZE]; |
172 | struct qedi_ctx *qedi; |
173 | }; |
174 | |
175 | /* Used to pass fastpath information needed to process CQEs */ |
176 | struct qedi_io_work { |
177 | struct list_head list; |
178 | struct iscsi_cqe_solicited cqe; |
179 | u16 que_idx; |
180 | }; |
181 | |
182 | /** |
183 | * struct iscsi_cid_queue - Per adapter iscsi cid queue |
184 | * |
185 | * @cid_que_base: queue base memory |
186 | * @cid_que: queue memory pointer |
187 | * @cid_q_prod_idx: produce index |
188 | * @cid_q_cons_idx: consumer index |
189 | * @cid_q_max_idx: max index. used to detect wrap around condition |
190 | * @cid_free_cnt: queue size |
191 | * @conn_cid_tbl: iscsi cid to conn structure mapping table |
192 | * |
193 | * Per adapter iSCSI CID Queue |
194 | */ |
195 | struct iscsi_cid_queue { |
196 | void *cid_que_base; |
197 | u32 *cid_que; |
198 | u32 cid_q_prod_idx; |
199 | u32 cid_q_cons_idx; |
200 | u32 cid_q_max_idx; |
201 | u32 cid_free_cnt; |
202 | struct qedi_conn **conn_cid_tbl; |
203 | }; |
204 | |
205 | struct qedi_portid_tbl { |
206 | spinlock_t lock; /* Port id lock */ |
207 | u16 start; |
208 | u16 max; |
209 | u16 next; |
210 | unsigned long *table; |
211 | }; |
212 | |
213 | struct qedi_itt_map { |
214 | __le32 itt; |
215 | struct qedi_cmd *p_cmd; |
216 | }; |
217 | |
218 | /* I/O tracing entry */ |
219 | #define QEDI_IO_TRACE_SIZE 2048 |
220 | struct qedi_io_log { |
221 | #define QEDI_IO_TRACE_REQ 0 |
222 | #define QEDI_IO_TRACE_RSP 1 |
223 | u8 direction; |
224 | u16 task_id; |
225 | u32 cid; |
226 | u32 port_id; /* Remote port fabric ID */ |
227 | int lun; |
228 | u8 op; /* SCSI CDB */ |
229 | u8 lba[4]; |
230 | unsigned int bufflen; /* SCSI buffer length */ |
231 | unsigned int sg_count; /* Number of SG elements */ |
232 | u8 fast_sgs; /* number of fast sgls */ |
233 | u8 slow_sgs; /* number of slow sgls */ |
234 | u8 cached_sgs; /* number of cached sgls */ |
235 | int result; /* Result passed back to mid-layer */ |
236 | unsigned long jiffies; /* Time stamp when I/O logged */ |
237 | int refcount; /* Reference count for task id */ |
238 | unsigned int blk_req_cpu; /* CPU that the task is queued on by |
239 | * blk layer |
240 | */ |
241 | unsigned int req_cpu; /* CPU that the task is queued on */ |
242 | unsigned int intr_cpu; /* Interrupt CPU that the task is received on */ |
243 | unsigned int blk_rsp_cpu;/* CPU that task is actually processed and |
244 | * returned to blk layer |
245 | */ |
246 | bool cached_sge; |
247 | bool slow_sge; |
248 | bool fast_sge; |
249 | }; |
250 | |
251 | /* Number of entries in BDQ */ |
252 | #define QEDI_BDQ_NUM 256 |
253 | #define QEDI_BDQ_BUF_SIZE 256 |
254 | |
255 | /* DMA coherent buffers for BDQ */ |
256 | struct qedi_bdq_buf { |
257 | void *buf_addr; |
258 | dma_addr_t buf_dma; |
259 | }; |
260 | |
261 | /* Main port level struct */ |
262 | struct qedi_ctx { |
263 | struct qedi_dbg_ctx dbg_ctx; |
264 | struct Scsi_Host *shost; |
265 | struct pci_dev *pdev; |
266 | struct qed_dev *cdev; |
267 | struct qed_dev_iscsi_info dev_info; |
268 | struct qed_int_info int_info; |
269 | struct qedi_glbl_q_params *p_cpuq; |
270 | struct global_queue **global_queues; |
271 | /* uio declaration */ |
272 | struct qedi_uio_dev *udev; |
273 | struct list_head ll2_skb_list; |
274 | spinlock_t ll2_lock; /* Light L2 lock */ |
275 | spinlock_t hba_lock; /* per port lock */ |
276 | struct task_struct *ll2_recv_thread; |
277 | unsigned long qedi_err_flags; |
278 | #define QEDI_ERR_ATTN_CLR_EN 0 |
279 | #define QEDI_ERR_IS_RECOVERABLE 2 |
280 | #define QEDI_ERR_OVERRIDE_EN 31 |
281 | unsigned long flags; |
282 | #define UIO_DEV_OPENED 1 |
283 | #define QEDI_IOTHREAD_WAKE 2 |
284 | #define QEDI_IN_RECOVERY 5 |
285 | #define QEDI_IN_OFFLINE 6 |
286 | #define QEDI_IN_SHUTDOWN 7 |
287 | #define QEDI_BLOCK_IO 8 |
288 | |
289 | u8 mac[ETH_ALEN]; |
290 | u32 src_ip[4]; |
291 | u8 ip_type; |
292 | |
293 | /* Physical address of above array */ |
294 | dma_addr_t hw_p_cpuq; |
295 | |
296 | struct qedi_bdq_buf bdq[QEDI_BDQ_NUM]; |
297 | void *bdq_pbl; |
298 | dma_addr_t bdq_pbl_dma; |
299 | size_t bdq_pbl_mem_size; |
300 | void *bdq_pbl_list; |
301 | dma_addr_t bdq_pbl_list_dma; |
302 | u8 bdq_pbl_list_num_entries; |
303 | struct qedi_nvm_iscsi_image *iscsi_image; |
304 | dma_addr_t nvm_buf_dma; |
305 | void __iomem *bdq_primary_prod; |
306 | void __iomem *bdq_secondary_prod; |
307 | u16 bdq_prod_idx; |
308 | u16 rq_num_entries; |
309 | |
310 | u32 max_sqes; |
311 | u8 num_queues; |
312 | u32 max_active_conns; |
313 | s32 msix_count; |
314 | |
315 | struct iscsi_cid_queue cid_que; |
316 | struct qedi_endpoint **ep_tbl; |
317 | struct qedi_portid_tbl lcl_port_tbl; |
318 | |
319 | /* Rx fast path intr context */ |
320 | struct qed_sb_info *sb_array; |
321 | struct qedi_fastpath *fp_array; |
322 | struct qed_iscsi_tid tasks; |
323 | |
324 | #define QEDI_LINK_DOWN 0 |
325 | #define QEDI_LINK_UP 1 |
326 | atomic_t link_state; |
327 | |
328 | #define QEDI_RESERVE_TASK_ID 0 |
329 | #define MAX_ISCSI_TASK_ENTRIES 4096 |
330 | #define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1) |
331 | unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG]; |
332 | struct qedi_itt_map *itt_map; |
333 | u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK]; |
334 | struct qed_pf_params pf_params; |
335 | |
336 | struct workqueue_struct *tmf_thread; |
337 | struct workqueue_struct *offload_thread; |
338 | |
339 | u16 ll2_mtu; |
340 | |
341 | struct workqueue_struct *dpc_wq; |
342 | struct delayed_work recovery_work; |
343 | struct delayed_work board_disable_work; |
344 | |
345 | spinlock_t task_idx_lock; /* To protect gbl context */ |
346 | s32 last_tidx_alloc; |
347 | s32 last_tidx_clear; |
348 | |
349 | struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE]; |
350 | spinlock_t io_trace_lock; /* prtect trace Log buf */ |
351 | u16 io_trace_idx; |
352 | unsigned int intr_cpu; |
353 | u32 cached_sgls; |
354 | bool use_cached_sge; |
355 | u32 slow_sgls; |
356 | bool use_slow_sge; |
357 | u32 fast_sgls; |
358 | bool use_fast_sge; |
359 | |
360 | atomic_t num_offloads; |
361 | #define SYSFS_FLAG_FW_SEL_BOOT 2 |
362 | #define IPV6_LEN 41 |
363 | #define IPV4_LEN 17 |
364 | struct iscsi_boot_kset *boot_kset; |
365 | |
366 | /* Used for iscsi statistics */ |
367 | struct mutex stats_lock; |
368 | }; |
369 | |
370 | struct qedi_work { |
371 | struct list_head list; |
372 | struct qedi_ctx *qedi; |
373 | union iscsi_cqe cqe; |
374 | u16 que_idx; |
375 | bool is_solicited; |
376 | }; |
377 | |
378 | struct qedi_percpu_s { |
379 | struct task_struct *iothread; |
380 | struct list_head work_list; |
381 | spinlock_t p_work_lock; /* Per cpu worker lock */ |
382 | }; |
383 | |
384 | static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid) |
385 | { |
386 | return (info->blocks[tid / info->num_tids_per_block] + |
387 | (tid % info->num_tids_per_block) * info->size); |
388 | } |
389 | |
390 | #define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32)) |
391 | #define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) |
392 | |
393 | #endif /* _QEDI_H_ */ |
394 | |