1 | /* |
2 | * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. |
3 | * |
4 | * Copyright (c) 2010-2015 Chelsio Communications, Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. |
9 | * |
10 | * Written by: Karen Xie (kxie@chelsio.com) |
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) |
12 | */ |
13 | |
14 | #ifndef __LIBCXGBI_H__ |
15 | #define __LIBCXGBI_H__ |
16 | |
17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/types.h> |
20 | #include <linux/debugfs.h> |
21 | #include <linux/list.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/scatterlist.h> |
25 | #include <linux/skbuff.h> |
26 | #include <linux/vmalloc.h> |
27 | #include <scsi/scsi_device.h> |
28 | #include <scsi/libiscsi_tcp.h> |
29 | |
30 | #include <libcxgb_ppm.h> |
31 | |
32 | enum cxgbi_dbg_flag { |
33 | CXGBI_DBG_ISCSI, |
34 | CXGBI_DBG_DDP, |
35 | CXGBI_DBG_TOE, |
36 | CXGBI_DBG_SOCK, |
37 | |
38 | CXGBI_DBG_PDU_TX, |
39 | CXGBI_DBG_PDU_RX, |
40 | CXGBI_DBG_DEV, |
41 | }; |
42 | |
43 | #define log_debug(level, fmt, ...) \ |
44 | do { \ |
45 | if (dbg_level & (level)) \ |
46 | pr_info(fmt, ##__VA_ARGS__); \ |
47 | } while (0) |
48 | |
49 | #define pr_info_ipaddr(fmt_trail, \ |
50 | addr1, addr2, args_trail...) \ |
51 | do { \ |
52 | if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \ |
53 | break; \ |
54 | pr_info("%pISpc - %pISpc, " fmt_trail, \ |
55 | addr1, addr2, args_trail); \ |
56 | } while (0) |
57 | |
58 | /* max. connections per adapter */ |
59 | #define CXGBI_MAX_CONN 16384 |
60 | |
61 | /* always allocate rooms for AHS */ |
62 | #define \ |
63 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) |
64 | |
65 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ |
66 | |
67 | /* |
68 | * align pdu size to multiple of 512 for better performance |
69 | */ |
70 | #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) |
71 | |
72 | #define ULP2_MODE_ISCSI 2 |
73 | |
74 | #define ULP2_MAX_PKT_SIZE 16224 |
75 | #define ULP2_MAX_PDU_PAYLOAD \ |
76 | (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) |
77 | |
78 | #define CXGBI_ULP2_MAX_ISO_PAYLOAD 65535 |
79 | |
80 | #define CXGBI_MAX_ISO_DATA_IN_SKB \ |
81 | min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD) |
82 | |
83 | #define cxgbi_is_iso_config(csk) ((csk)->cdev->skb_iso_txhdr) |
84 | #define cxgbi_is_iso_disabled(csk) ((csk)->disable_iso) |
85 | |
86 | /* |
87 | * For iscsi connections HW may inserts digest bytes into the pdu. Those digest |
88 | * bytes are not sent by the host but are part of the TCP payload and therefore |
89 | * consume TCP sequence space. |
90 | */ |
91 | static const unsigned int [] = { 0, 4, 4, 8 }; |
92 | static inline unsigned int (int submode) |
93 | { |
94 | return ulp2_extra_len[submode & 3]; |
95 | } |
96 | |
97 | #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ |
98 | #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ |
99 | #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ |
100 | #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ |
101 | |
102 | /* |
103 | * sge_opaque_hdr - |
104 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets |
105 | * and for which we must reserve space. |
106 | */ |
107 | struct sge_opaque_hdr { |
108 | void *dev; |
109 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; |
110 | }; |
111 | |
112 | struct cxgbi_sock { |
113 | struct cxgbi_device *cdev; |
114 | |
115 | int tid; |
116 | int atid; |
117 | unsigned long flags; |
118 | unsigned int mtu; |
119 | unsigned short ; |
120 | unsigned short txq_idx; |
121 | unsigned short advmss; |
122 | unsigned int tx_chan; |
123 | unsigned int rx_chan; |
124 | unsigned int mss_idx; |
125 | unsigned int smac_idx; |
126 | unsigned char port_id; |
127 | int wr_max_cred; |
128 | int wr_cred; |
129 | int wr_una_cred; |
130 | #ifdef CONFIG_CHELSIO_T4_DCB |
131 | u8 dcb_priority; |
132 | #endif |
133 | unsigned char hcrc_len; |
134 | unsigned char dcrc_len; |
135 | |
136 | void *l2t; |
137 | struct sk_buff *wr_pending_head; |
138 | struct sk_buff *wr_pending_tail; |
139 | struct sk_buff *cpl_close; |
140 | struct sk_buff *cpl_abort_req; |
141 | struct sk_buff *cpl_abort_rpl; |
142 | struct sk_buff *skb_ulp_lhdr; |
143 | spinlock_t lock; |
144 | struct kref refcnt; |
145 | unsigned int state; |
146 | unsigned int csk_family; |
147 | union { |
148 | struct sockaddr_in saddr; |
149 | struct sockaddr_in6 saddr6; |
150 | }; |
151 | union { |
152 | struct sockaddr_in daddr; |
153 | struct sockaddr_in6 daddr6; |
154 | }; |
155 | struct dst_entry *dst; |
156 | struct sk_buff_head receive_queue; |
157 | struct sk_buff_head write_queue; |
158 | struct timer_list retry_timer; |
159 | struct completion cmpl; |
160 | int err; |
161 | rwlock_t callback_lock; |
162 | void *user_data; |
163 | |
164 | u32 rcv_nxt; |
165 | u32 copied_seq; |
166 | u32 rcv_wup; |
167 | u32 snd_nxt; |
168 | u32 snd_una; |
169 | u32 write_seq; |
170 | u32 snd_win; |
171 | u32 rcv_win; |
172 | |
173 | bool disable_iso; |
174 | u32 no_tx_credits; |
175 | unsigned long prev_iso_ts; |
176 | }; |
177 | |
178 | /* |
179 | * connection states |
180 | */ |
181 | enum cxgbi_sock_states{ |
182 | CTP_CLOSED, |
183 | CTP_CONNECTING, |
184 | CTP_ACTIVE_OPEN, |
185 | CTP_ESTABLISHED, |
186 | CTP_ACTIVE_CLOSE, |
187 | CTP_PASSIVE_CLOSE, |
188 | CTP_CLOSE_WAIT_1, |
189 | CTP_CLOSE_WAIT_2, |
190 | CTP_ABORTING, |
191 | }; |
192 | |
193 | /* |
194 | * Connection flags -- many to track some close related events. |
195 | */ |
196 | enum cxgbi_sock_flags { |
197 | CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ |
198 | CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ |
199 | CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ |
200 | CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ |
201 | CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ |
202 | CTPF_HAS_ATID, /* reserved atid */ |
203 | CTPF_HAS_TID, /* reserved hw tid */ |
204 | CTPF_OFFLOAD_DOWN, /* offload function off */ |
205 | CTPF_LOGOUT_RSP_RCVD, /* received logout response */ |
206 | }; |
207 | |
208 | struct cxgbi_skb_rx_cb { |
209 | __u32 ddigest; |
210 | __u32 pdulen; |
211 | }; |
212 | |
213 | struct cxgbi_skb_tx_cb { |
214 | void *handle; |
215 | void *arp_err_handler; |
216 | struct sk_buff *wr_next; |
217 | u16 iscsi_hdr_len; |
218 | u8 ulp_mode; |
219 | }; |
220 | |
221 | enum cxgbi_skcb_flags { |
222 | SKCBF_TX_NEED_HDR, /* packet needs a header */ |
223 | SKCBF_TX_MEM_WRITE, /* memory write */ |
224 | SKCBF_TX_FLAG_COMPL, /* wr completion flag */ |
225 | SKCBF_RX_COALESCED, /* received whole pdu */ |
226 | SKCBF_RX_HDR, /* received pdu header */ |
227 | SKCBF_RX_DATA, /* received pdu payload */ |
228 | SKCBF_RX_STATUS, /* received ddp status */ |
229 | SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */ |
230 | SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ |
231 | SKCBF_RX_HCRC_ERR, /* header digest error */ |
232 | SKCBF_RX_DCRC_ERR, /* data digest error */ |
233 | SKCBF_RX_PAD_ERR, /* padding byte error */ |
234 | SKCBF_TX_ISO, /* iso cpl in tx skb */ |
235 | }; |
236 | |
237 | struct cxgbi_skb_cb { |
238 | union { |
239 | struct cxgbi_skb_rx_cb rx; |
240 | struct cxgbi_skb_tx_cb tx; |
241 | }; |
242 | unsigned long flags; |
243 | unsigned int seq; |
244 | }; |
245 | |
246 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) |
247 | #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) |
248 | #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) |
249 | #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) |
250 | #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) |
251 | #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) |
252 | #define cxgbi_skcb_tx_iscsi_hdrlen(skb) (CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len) |
253 | #define cxgbi_skcb_tx_ulp_mode(skb) (CXGBI_SKB_CB(skb)->tx.ulp_mode) |
254 | |
255 | static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, |
256 | enum cxgbi_skcb_flags flag) |
257 | { |
258 | __set_bit(flag, &(cxgbi_skcb_flags(skb))); |
259 | } |
260 | |
261 | static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, |
262 | enum cxgbi_skcb_flags flag) |
263 | { |
264 | __clear_bit(flag, &(cxgbi_skcb_flags(skb))); |
265 | } |
266 | |
267 | static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb, |
268 | enum cxgbi_skcb_flags flag) |
269 | { |
270 | return test_bit(flag, &(cxgbi_skcb_flags(skb))); |
271 | } |
272 | |
273 | static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, |
274 | enum cxgbi_sock_flags flag) |
275 | { |
276 | __set_bit(flag, &csk->flags); |
277 | log_debug(1 << CXGBI_DBG_SOCK, |
278 | "csk 0x%p,%u,0x%lx, bit %d.\n" , |
279 | csk, csk->state, csk->flags, flag); |
280 | } |
281 | |
282 | static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, |
283 | enum cxgbi_sock_flags flag) |
284 | { |
285 | __clear_bit(flag, &csk->flags); |
286 | log_debug(1 << CXGBI_DBG_SOCK, |
287 | "csk 0x%p,%u,0x%lx, bit %d.\n" , |
288 | csk, csk->state, csk->flags, flag); |
289 | } |
290 | |
291 | static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, |
292 | enum cxgbi_sock_flags flag) |
293 | { |
294 | if (csk == NULL) |
295 | return 0; |
296 | return test_bit(flag, &csk->flags); |
297 | } |
298 | |
299 | static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) |
300 | { |
301 | log_debug(1 << CXGBI_DBG_SOCK, |
302 | "csk 0x%p,%u,0x%lx, state -> %u.\n" , |
303 | csk, csk->state, csk->flags, state); |
304 | csk->state = state; |
305 | } |
306 | |
307 | static inline void cxgbi_sock_free(struct kref *kref) |
308 | { |
309 | struct cxgbi_sock *csk = container_of(kref, |
310 | struct cxgbi_sock, |
311 | refcnt); |
312 | if (csk) { |
313 | log_debug(1 << CXGBI_DBG_SOCK, |
314 | "free csk 0x%p, state %u, flags 0x%lx\n" , |
315 | csk, csk->state, csk->flags); |
316 | kfree(objp: csk); |
317 | } |
318 | } |
319 | |
320 | static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) |
321 | { |
322 | log_debug(1 << CXGBI_DBG_SOCK, |
323 | "%s, put csk 0x%p, ref %u-1.\n" , |
324 | fn, csk, kref_read(&csk->refcnt)); |
325 | kref_put(kref: &csk->refcnt, release: cxgbi_sock_free); |
326 | } |
327 | #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) |
328 | |
329 | static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) |
330 | { |
331 | log_debug(1 << CXGBI_DBG_SOCK, |
332 | "%s, get csk 0x%p, ref %u+1.\n" , |
333 | fn, csk, kref_read(&csk->refcnt)); |
334 | kref_get(kref: &csk->refcnt); |
335 | } |
336 | #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) |
337 | |
338 | static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) |
339 | { |
340 | return csk->state >= CTP_ACTIVE_CLOSE; |
341 | } |
342 | |
343 | static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) |
344 | { |
345 | return csk->state == CTP_ESTABLISHED; |
346 | } |
347 | |
348 | static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) |
349 | { |
350 | struct sk_buff *skb; |
351 | |
352 | while ((skb = __skb_dequeue(list: &csk->write_queue))) |
353 | __kfree_skb(skb); |
354 | } |
355 | |
356 | static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) |
357 | { |
358 | unsigned int wscale = 0; |
359 | |
360 | while (wscale < 14 && (65535 << wscale) < win) |
361 | wscale++; |
362 | return wscale; |
363 | } |
364 | |
365 | static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp) |
366 | { |
367 | struct sk_buff *skb = alloc_skb(size: wrlen + dlen, priority: gfp); |
368 | |
369 | if (skb) { |
370 | __skb_put(skb, len: wrlen); |
371 | memset(skb->head, 0, wrlen + dlen); |
372 | } else |
373 | pr_info("alloc cpl wr skb %u+%u, OOM.\n" , wrlen, dlen); |
374 | return skb; |
375 | } |
376 | |
377 | |
378 | /* |
379 | * The number of WRs needed for an skb depends on the number of fragments |
380 | * in the skb and whether it has any payload in its main body. This maps the |
381 | * length of the gather list represented by an skb into the # of necessary WRs. |
382 | * The extra two fragments are for iscsi bhs and payload padding. |
383 | */ |
384 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) |
385 | |
386 | static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) |
387 | { |
388 | csk->wr_pending_head = csk->wr_pending_tail = NULL; |
389 | } |
390 | |
391 | static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, |
392 | struct sk_buff *skb) |
393 | { |
394 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
395 | /* |
396 | * We want to take an extra reference since both us and the driver |
397 | * need to free the packet before it's really freed. |
398 | */ |
399 | skb_get(skb); |
400 | |
401 | if (!csk->wr_pending_head) |
402 | csk->wr_pending_head = skb; |
403 | else |
404 | cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; |
405 | csk->wr_pending_tail = skb; |
406 | } |
407 | |
408 | static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) |
409 | { |
410 | int n = 0; |
411 | const struct sk_buff *skb = csk->wr_pending_head; |
412 | |
413 | while (skb) { |
414 | n += skb->csum; |
415 | skb = cxgbi_skcb_tx_wr_next(skb); |
416 | } |
417 | return n; |
418 | } |
419 | |
420 | static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) |
421 | { |
422 | return csk->wr_pending_head; |
423 | } |
424 | |
425 | static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) |
426 | { |
427 | struct sk_buff *skb = csk->wr_pending_head; |
428 | |
429 | if (likely(skb)) { |
430 | csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); |
431 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
432 | } |
433 | return skb; |
434 | } |
435 | |
436 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); |
437 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); |
438 | void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); |
439 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); |
440 | void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); |
441 | void cxgbi_sock_closed(struct cxgbi_sock *); |
442 | void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); |
443 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); |
444 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); |
445 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); |
446 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, |
447 | int); |
448 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); |
449 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); |
450 | |
451 | struct cxgbi_hba { |
452 | struct net_device *ndev; |
453 | struct net_device *vdev; /* vlan dev */ |
454 | struct Scsi_Host *shost; |
455 | struct cxgbi_device *cdev; |
456 | __be32 ipv4addr; |
457 | unsigned char port_id; |
458 | }; |
459 | |
460 | struct cxgbi_ports_map { |
461 | unsigned int max_connect; |
462 | unsigned int used; |
463 | unsigned short sport_base; |
464 | spinlock_t lock; |
465 | unsigned int next; |
466 | struct cxgbi_sock **port_csk; |
467 | }; |
468 | |
469 | #define CXGBI_FLAG_DEV_T3 0x1 |
470 | #define CXGBI_FLAG_DEV_T4 0x2 |
471 | #define CXGBI_FLAG_ADAPTER_RESET 0x4 |
472 | #define CXGBI_FLAG_IPV4_SET 0x10 |
473 | #define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40 |
474 | #define CXGBI_FLAG_DDP_OFF 0x100 |
475 | #define CXGBI_FLAG_DEV_ISO_OFF 0x400 |
476 | |
477 | struct cxgbi_device { |
478 | struct list_head list_head; |
479 | struct list_head rcu_node; |
480 | unsigned int flags; |
481 | struct net_device **ports; |
482 | void *lldev; |
483 | struct cxgbi_hba **hbas; |
484 | const unsigned short *mtus; |
485 | unsigned char nmtus; |
486 | unsigned char nports; |
487 | struct pci_dev *pdev; |
488 | struct dentry *debugfs_root; |
489 | struct iscsi_transport *itp; |
490 | struct module *owner; |
491 | |
492 | unsigned int pfvf; |
493 | unsigned int rx_credit_thres; |
494 | unsigned int skb_tx_rsvd; |
495 | u32 skb_iso_txhdr; |
496 | unsigned int ; /* for msg coalesced mode */ |
497 | unsigned int tx_max_size; |
498 | unsigned int rx_max_size; |
499 | unsigned int rxq_idx_cntr; |
500 | struct cxgbi_ports_map pmap; |
501 | |
502 | void (*dev_ddp_cleanup)(struct cxgbi_device *); |
503 | struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); |
504 | int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, |
505 | struct cxgbi_task_tag_info *); |
506 | void (*csk_ddp_clear_map)(struct cxgbi_device *cdev, |
507 | struct cxgbi_ppm *, |
508 | struct cxgbi_task_tag_info *); |
509 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, |
510 | unsigned int, int, int); |
511 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, |
512 | unsigned int, int); |
513 | |
514 | void (*csk_release_offload_resources)(struct cxgbi_sock *); |
515 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); |
516 | u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); |
517 | int (*csk_push_tx_frames)(struct cxgbi_sock *, int); |
518 | void (*csk_send_abort_req)(struct cxgbi_sock *); |
519 | void (*csk_send_close_req)(struct cxgbi_sock *); |
520 | int (*csk_alloc_cpls)(struct cxgbi_sock *); |
521 | int (*csk_init_act_open)(struct cxgbi_sock *); |
522 | |
523 | void *dd_data; |
524 | }; |
525 | #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) |
526 | |
527 | struct cxgbi_conn { |
528 | struct cxgbi_endpoint *cep; |
529 | struct iscsi_conn *iconn; |
530 | struct cxgbi_hba *chba; |
531 | u32 task_idx_bits; |
532 | unsigned int ddp_full; |
533 | unsigned int ddp_tag_full; |
534 | }; |
535 | |
536 | struct cxgbi_endpoint { |
537 | struct cxgbi_conn *cconn; |
538 | struct cxgbi_hba *chba; |
539 | struct cxgbi_sock *csk; |
540 | }; |
541 | |
542 | struct cxgbi_task_data { |
543 | #define CXGBI_TASK_SGL_CHECKED 0x1 |
544 | #define CXGBI_TASK_SGL_COPY 0x2 |
545 | u8 flags; |
546 | unsigned short nr_frags; |
547 | struct page_frag frags[MAX_SKB_FRAGS]; |
548 | struct sk_buff *skb; |
549 | unsigned int dlen; |
550 | unsigned int offset; |
551 | unsigned int count; |
552 | unsigned int sgoffset; |
553 | u32 total_count; |
554 | u32 total_offset; |
555 | u32 max_xmit_dlength; |
556 | struct cxgbi_task_tag_info ttinfo; |
557 | }; |
558 | #define iscsi_task_cxgbi_data(task) \ |
559 | ((task)->dd_data + sizeof(struct iscsi_tcp_task)) |
560 | |
561 | struct cxgbi_iso_info { |
562 | #define CXGBI_ISO_INFO_FSLICE 0x1 |
563 | #define CXGBI_ISO_INFO_LSLICE 0x2 |
564 | #define CXGBI_ISO_INFO_IMM_ENABLE 0x4 |
565 | u8 flags; |
566 | u8 op; |
567 | u8 ahs; |
568 | u8 num_pdu; |
569 | u32 mpdu; |
570 | u32 burst_size; |
571 | u32 len; |
572 | u32 segment_offset; |
573 | u32 datasn_offset; |
574 | u32 buffer_offset; |
575 | }; |
576 | |
577 | static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) |
578 | { |
579 | if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) |
580 | chba->ipv4addr = ipaddr; |
581 | else |
582 | pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n" , |
583 | chba->ndev->name); |
584 | } |
585 | |
586 | struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); |
587 | void cxgbi_device_unregister(struct cxgbi_device *); |
588 | void cxgbi_device_unregister_all(unsigned int flag); |
589 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); |
590 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); |
591 | struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, |
592 | int *); |
593 | int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, |
594 | const struct scsi_host_template *, |
595 | struct scsi_transport_template *); |
596 | void cxgbi_hbas_remove(struct cxgbi_device *); |
597 | |
598 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, |
599 | unsigned int max_conn); |
600 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); |
601 | |
602 | void cxgbi_conn_tx_open(struct cxgbi_sock *); |
603 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *); |
604 | int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); |
605 | int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); |
606 | int cxgbi_conn_xmit_pdu(struct iscsi_task *); |
607 | |
608 | void cxgbi_cleanup_task(struct iscsi_task *task); |
609 | |
610 | umode_t cxgbi_attr_is_visible(int param_type, int param); |
611 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); |
612 | int cxgbi_set_conn_param(struct iscsi_cls_conn *, |
613 | enum iscsi_param, char *, int); |
614 | int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *); |
615 | struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); |
616 | int cxgbi_bind_conn(struct iscsi_cls_session *, |
617 | struct iscsi_cls_conn *, u64, int); |
618 | void cxgbi_destroy_session(struct iscsi_cls_session *); |
619 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, |
620 | u16, u16, u32); |
621 | int cxgbi_set_host_param(struct Scsi_Host *, |
622 | enum iscsi_host_param, char *, int); |
623 | int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); |
624 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, |
625 | struct sockaddr *, int); |
626 | int cxgbi_ep_poll(struct iscsi_endpoint *, int); |
627 | void cxgbi_ep_disconnect(struct iscsi_endpoint *); |
628 | |
629 | int cxgbi_iscsi_init(struct iscsi_transport *, |
630 | struct scsi_transport_template **); |
631 | void cxgbi_iscsi_cleanup(struct iscsi_transport *, |
632 | struct scsi_transport_template **); |
633 | void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); |
634 | int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, |
635 | unsigned int, unsigned int); |
636 | int cxgbi_ddp_cleanup(struct cxgbi_device *); |
637 | void cxgbi_ddp_page_size_factor(int *); |
638 | void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *, |
639 | struct cxgbi_task_tag_info *, |
640 | struct scatterlist **sg_pp, unsigned int *sg_off); |
641 | int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, |
642 | struct cxgbi_tag_format *tformat, |
643 | unsigned int iscsi_size, unsigned int llimit, |
644 | unsigned int start, unsigned int rsvd_factor, |
645 | unsigned int edram_start, unsigned int edram_size); |
646 | #endif /*__LIBCXGBI_H__*/ |
647 | |