1 | /* |
2 | * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. |
3 | * |
4 | * Copyright (c) 2010-2015 Chelsio Communications, Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. |
9 | * |
10 | * Written by: Karen Xie (kxie@chelsio.com) |
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) |
12 | */ |
13 | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
15 | |
16 | #include <linux/skbuff.h> |
17 | #include <linux/crypto.h> |
18 | #include <linux/scatterlist.h> |
19 | #include <linux/pci.h> |
20 | #include <scsi/scsi.h> |
21 | #include <scsi/scsi_cmnd.h> |
22 | #include <scsi/scsi_host.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/inet.h> |
25 | #include <net/dst.h> |
26 | #include <net/route.h> |
27 | #include <net/ipv6.h> |
28 | #include <net/ip6_route.h> |
29 | #include <net/addrconf.h> |
30 | |
31 | #include <linux/inetdevice.h> /* ip_dev_find */ |
32 | #include <linux/module.h> |
33 | #include <net/tcp.h> |
34 | |
35 | static unsigned int dbg_level; |
36 | |
37 | #include "libcxgbi.h" |
38 | |
39 | #define DRV_MODULE_NAME "libcxgbi" |
40 | #define DRV_MODULE_DESC "Chelsio iSCSI driver library" |
41 | #define DRV_MODULE_VERSION "0.9.1-ko" |
42 | #define DRV_MODULE_RELDATE "Apr. 2015" |
43 | |
44 | static char version[] = |
45 | DRV_MODULE_DESC " " DRV_MODULE_NAME |
46 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n" ; |
47 | |
48 | MODULE_AUTHOR("Chelsio Communications, Inc." ); |
49 | MODULE_DESCRIPTION(DRV_MODULE_DESC); |
50 | MODULE_VERSION(DRV_MODULE_VERSION); |
51 | MODULE_LICENSE("GPL" ); |
52 | |
53 | module_param(dbg_level, uint, 0644); |
54 | MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)" ); |
55 | |
56 | |
57 | /* |
58 | * cxgbi device management |
59 | * maintains a list of the cxgbi devices |
60 | */ |
61 | static LIST_HEAD(cdev_list); |
62 | static DEFINE_MUTEX(cdev_mutex); |
63 | |
64 | static LIST_HEAD(cdev_rcu_list); |
65 | static DEFINE_SPINLOCK(cdev_rcu_lock); |
66 | |
67 | static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) |
68 | { |
69 | if (age) |
70 | *age = sw_tag & 0x7FFF; |
71 | if (idx) |
72 | *idx = (sw_tag >> 16) & 0x7FFF; |
73 | } |
74 | |
75 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, |
76 | unsigned int max_conn) |
77 | { |
78 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
79 | |
80 | pmap->port_csk = kvzalloc(array_size(max_conn, |
81 | sizeof(struct cxgbi_sock *)), |
82 | GFP_KERNEL | __GFP_NOWARN); |
83 | if (!pmap->port_csk) { |
84 | pr_warn("cdev 0x%p, portmap OOM %u.\n" , cdev, max_conn); |
85 | return -ENOMEM; |
86 | } |
87 | |
88 | pmap->max_connect = max_conn; |
89 | pmap->sport_base = base; |
90 | spin_lock_init(&pmap->lock); |
91 | return 0; |
92 | } |
93 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); |
94 | |
95 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) |
96 | { |
97 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
98 | struct cxgbi_sock *csk; |
99 | int i; |
100 | |
101 | for (i = 0; i < pmap->max_connect; i++) { |
102 | if (pmap->port_csk[i]) { |
103 | csk = pmap->port_csk[i]; |
104 | pmap->port_csk[i] = NULL; |
105 | log_debug(1 << CXGBI_DBG_SOCK, |
106 | "csk 0x%p, cdev 0x%p, offload down.\n" , |
107 | csk, cdev); |
108 | spin_lock_bh(lock: &csk->lock); |
109 | cxgbi_sock_set_flag(csk, flag: CTPF_OFFLOAD_DOWN); |
110 | cxgbi_sock_closed(csk); |
111 | spin_unlock_bh(lock: &csk->lock); |
112 | cxgbi_sock_put(csk); |
113 | } |
114 | } |
115 | } |
116 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); |
117 | |
118 | static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) |
119 | { |
120 | log_debug(1 << CXGBI_DBG_DEV, |
121 | "cdev 0x%p, p# %u.\n" , cdev, cdev->nports); |
122 | cxgbi_hbas_remove(cdev); |
123 | cxgbi_device_portmap_cleanup(cdev); |
124 | if (cdev->cdev2ppm) |
125 | cxgbi_ppm_release(cdev->cdev2ppm(cdev)); |
126 | if (cdev->pmap.max_connect) |
127 | kvfree(addr: cdev->pmap.port_csk); |
128 | kfree(objp: cdev); |
129 | } |
130 | |
131 | struct cxgbi_device *cxgbi_device_register(unsigned int , |
132 | unsigned int nports) |
133 | { |
134 | struct cxgbi_device *cdev; |
135 | |
136 | cdev = kzalloc(size: sizeof(*cdev) + extra + nports * |
137 | (sizeof(struct cxgbi_hba *) + |
138 | sizeof(struct net_device *)), |
139 | GFP_KERNEL); |
140 | if (!cdev) { |
141 | pr_warn("nport %d, OOM.\n" , nports); |
142 | return NULL; |
143 | } |
144 | cdev->ports = (struct net_device **)(cdev + 1); |
145 | cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * |
146 | sizeof(struct net_device *)); |
147 | if (extra) |
148 | cdev->dd_data = ((char *)cdev->hbas) + |
149 | nports * sizeof(struct cxgbi_hba *); |
150 | spin_lock_init(&cdev->pmap.lock); |
151 | |
152 | mutex_lock(&cdev_mutex); |
153 | list_add_tail(new: &cdev->list_head, head: &cdev_list); |
154 | mutex_unlock(lock: &cdev_mutex); |
155 | |
156 | spin_lock(lock: &cdev_rcu_lock); |
157 | list_add_tail_rcu(new: &cdev->rcu_node, head: &cdev_rcu_list); |
158 | spin_unlock(lock: &cdev_rcu_lock); |
159 | |
160 | log_debug(1 << CXGBI_DBG_DEV, |
161 | "cdev 0x%p, p# %u.\n" , cdev, nports); |
162 | return cdev; |
163 | } |
164 | EXPORT_SYMBOL_GPL(cxgbi_device_register); |
165 | |
166 | void cxgbi_device_unregister(struct cxgbi_device *cdev) |
167 | { |
168 | log_debug(1 << CXGBI_DBG_DEV, |
169 | "cdev 0x%p, p# %u,%s.\n" , |
170 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "" ); |
171 | |
172 | mutex_lock(&cdev_mutex); |
173 | list_del(entry: &cdev->list_head); |
174 | mutex_unlock(lock: &cdev_mutex); |
175 | |
176 | spin_lock(lock: &cdev_rcu_lock); |
177 | list_del_rcu(entry: &cdev->rcu_node); |
178 | spin_unlock(lock: &cdev_rcu_lock); |
179 | synchronize_rcu(); |
180 | |
181 | cxgbi_device_destroy(cdev); |
182 | } |
183 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); |
184 | |
185 | void cxgbi_device_unregister_all(unsigned int flag) |
186 | { |
187 | struct cxgbi_device *cdev, *tmp; |
188 | |
189 | mutex_lock(&cdev_mutex); |
190 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
191 | if ((cdev->flags & flag) == flag) { |
192 | mutex_unlock(lock: &cdev_mutex); |
193 | cxgbi_device_unregister(cdev); |
194 | mutex_lock(&cdev_mutex); |
195 | } |
196 | } |
197 | mutex_unlock(lock: &cdev_mutex); |
198 | } |
199 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); |
200 | |
201 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) |
202 | { |
203 | struct cxgbi_device *cdev, *tmp; |
204 | |
205 | mutex_lock(&cdev_mutex); |
206 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
207 | if (cdev->lldev == lldev) { |
208 | mutex_unlock(lock: &cdev_mutex); |
209 | return cdev; |
210 | } |
211 | } |
212 | mutex_unlock(lock: &cdev_mutex); |
213 | |
214 | log_debug(1 << CXGBI_DBG_DEV, |
215 | "lldev 0x%p, NO match found.\n" , lldev); |
216 | return NULL; |
217 | } |
218 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); |
219 | |
220 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, |
221 | int *port) |
222 | { |
223 | struct net_device *vdev = NULL; |
224 | struct cxgbi_device *cdev, *tmp; |
225 | int i; |
226 | |
227 | if (is_vlan_dev(dev: ndev)) { |
228 | vdev = ndev; |
229 | ndev = vlan_dev_real_dev(dev: ndev); |
230 | log_debug(1 << CXGBI_DBG_DEV, |
231 | "vlan dev %s -> %s.\n" , vdev->name, ndev->name); |
232 | } |
233 | |
234 | mutex_lock(&cdev_mutex); |
235 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
236 | for (i = 0; i < cdev->nports; i++) { |
237 | if (ndev == cdev->ports[i]) { |
238 | cdev->hbas[i]->vdev = vdev; |
239 | mutex_unlock(lock: &cdev_mutex); |
240 | if (port) |
241 | *port = i; |
242 | return cdev; |
243 | } |
244 | } |
245 | } |
246 | mutex_unlock(lock: &cdev_mutex); |
247 | log_debug(1 << CXGBI_DBG_DEV, |
248 | "ndev 0x%p, %s, NO match found.\n" , ndev, ndev->name); |
249 | return NULL; |
250 | } |
251 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); |
252 | |
253 | struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, |
254 | int *port) |
255 | { |
256 | struct net_device *vdev = NULL; |
257 | struct cxgbi_device *cdev; |
258 | int i; |
259 | |
260 | if (is_vlan_dev(dev: ndev)) { |
261 | vdev = ndev; |
262 | ndev = vlan_dev_real_dev(dev: ndev); |
263 | pr_info("vlan dev %s -> %s.\n" , vdev->name, ndev->name); |
264 | } |
265 | |
266 | rcu_read_lock(); |
267 | list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { |
268 | for (i = 0; i < cdev->nports; i++) { |
269 | if (ndev == cdev->ports[i]) { |
270 | cdev->hbas[i]->vdev = vdev; |
271 | rcu_read_unlock(); |
272 | if (port) |
273 | *port = i; |
274 | return cdev; |
275 | } |
276 | } |
277 | } |
278 | rcu_read_unlock(); |
279 | |
280 | log_debug(1 << CXGBI_DBG_DEV, |
281 | "ndev 0x%p, %s, NO match found.\n" , ndev, ndev->name); |
282 | return NULL; |
283 | } |
284 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); |
285 | |
286 | static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, |
287 | int *port) |
288 | { |
289 | struct net_device *vdev = NULL; |
290 | struct cxgbi_device *cdev, *tmp; |
291 | int i; |
292 | |
293 | if (is_vlan_dev(dev: ndev)) { |
294 | vdev = ndev; |
295 | ndev = vlan_dev_real_dev(dev: ndev); |
296 | pr_info("vlan dev %s -> %s.\n" , vdev->name, ndev->name); |
297 | } |
298 | |
299 | mutex_lock(&cdev_mutex); |
300 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
301 | for (i = 0; i < cdev->nports; i++) { |
302 | if (!memcmp(p: ndev->dev_addr, q: cdev->ports[i]->dev_addr, |
303 | MAX_ADDR_LEN)) { |
304 | cdev->hbas[i]->vdev = vdev; |
305 | mutex_unlock(lock: &cdev_mutex); |
306 | if (port) |
307 | *port = i; |
308 | return cdev; |
309 | } |
310 | } |
311 | } |
312 | mutex_unlock(lock: &cdev_mutex); |
313 | log_debug(1 << CXGBI_DBG_DEV, |
314 | "ndev 0x%p, %s, NO match mac found.\n" , |
315 | ndev, ndev->name); |
316 | return NULL; |
317 | } |
318 | |
319 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) |
320 | { |
321 | int i; |
322 | struct cxgbi_hba *chba; |
323 | |
324 | log_debug(1 << CXGBI_DBG_DEV, |
325 | "cdev 0x%p, p#%u.\n" , cdev, cdev->nports); |
326 | |
327 | for (i = 0; i < cdev->nports; i++) { |
328 | chba = cdev->hbas[i]; |
329 | if (chba) { |
330 | cdev->hbas[i] = NULL; |
331 | iscsi_host_remove(shost: chba->shost, is_shutdown: false); |
332 | pci_dev_put(dev: cdev->pdev); |
333 | iscsi_host_free(shost: chba->shost); |
334 | } |
335 | } |
336 | } |
337 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); |
338 | |
339 | int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, |
340 | unsigned int max_conns, const struct scsi_host_template *sht, |
341 | struct scsi_transport_template *stt) |
342 | { |
343 | struct cxgbi_hba *chba; |
344 | struct Scsi_Host *shost; |
345 | int i, err; |
346 | |
347 | log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n" , cdev, cdev->nports); |
348 | |
349 | for (i = 0; i < cdev->nports; i++) { |
350 | shost = iscsi_host_alloc(sht, dd_data_size: sizeof(*chba), xmit_can_sleep: 1); |
351 | if (!shost) { |
352 | pr_info("0x%p, p%d, %s, host alloc failed.\n" , |
353 | cdev, i, cdev->ports[i]->name); |
354 | err = -ENOMEM; |
355 | goto err_out; |
356 | } |
357 | |
358 | shost->transportt = stt; |
359 | shost->max_lun = max_lun; |
360 | shost->max_id = max_conns - 1; |
361 | shost->max_channel = 0; |
362 | shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; |
363 | |
364 | chba = iscsi_host_priv(shost); |
365 | chba->cdev = cdev; |
366 | chba->ndev = cdev->ports[i]; |
367 | chba->shost = shost; |
368 | |
369 | shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX; |
370 | |
371 | log_debug(1 << CXGBI_DBG_DEV, |
372 | "cdev 0x%p, p#%d %s: chba 0x%p.\n" , |
373 | cdev, i, cdev->ports[i]->name, chba); |
374 | |
375 | pci_dev_get(dev: cdev->pdev); |
376 | err = iscsi_host_add(shost, pdev: &cdev->pdev->dev); |
377 | if (err) { |
378 | pr_info("cdev 0x%p, p#%d %s, host add failed.\n" , |
379 | cdev, i, cdev->ports[i]->name); |
380 | pci_dev_put(dev: cdev->pdev); |
381 | scsi_host_put(t: shost); |
382 | goto err_out; |
383 | } |
384 | |
385 | cdev->hbas[i] = chba; |
386 | } |
387 | |
388 | return 0; |
389 | |
390 | err_out: |
391 | cxgbi_hbas_remove(cdev); |
392 | return err; |
393 | } |
394 | EXPORT_SYMBOL_GPL(cxgbi_hbas_add); |
395 | |
396 | /* |
397 | * iSCSI offload |
398 | * |
399 | * - source port management |
400 | * To find a free source port in the port allocation map we use a very simple |
401 | * rotor scheme to look for the next free port. |
402 | * |
403 | * If a source port has been specified make sure that it doesn't collide with |
404 | * our normal source port allocation map. If it's outside the range of our |
405 | * allocation/deallocation scheme just let them use it. |
406 | * |
407 | * If the source port is outside our allocation range, the caller is |
408 | * responsible for keeping track of their port usage. |
409 | */ |
410 | |
411 | static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, |
412 | unsigned char port_id) |
413 | { |
414 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
415 | unsigned int i; |
416 | unsigned int used; |
417 | |
418 | if (!pmap->max_connect || !pmap->used) |
419 | return NULL; |
420 | |
421 | spin_lock_bh(lock: &pmap->lock); |
422 | used = pmap->used; |
423 | for (i = 0; used && i < pmap->max_connect; i++) { |
424 | struct cxgbi_sock *csk = pmap->port_csk[i]; |
425 | |
426 | if (csk) { |
427 | if (csk->port_id == port_id) { |
428 | spin_unlock_bh(lock: &pmap->lock); |
429 | return csk; |
430 | } |
431 | used--; |
432 | } |
433 | } |
434 | spin_unlock_bh(lock: &pmap->lock); |
435 | |
436 | return NULL; |
437 | } |
438 | |
439 | static int sock_get_port(struct cxgbi_sock *csk) |
440 | { |
441 | struct cxgbi_device *cdev = csk->cdev; |
442 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
443 | unsigned int start; |
444 | int idx; |
445 | __be16 *port; |
446 | |
447 | if (!pmap->max_connect) { |
448 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n" , |
449 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); |
450 | return -EADDRNOTAVAIL; |
451 | } |
452 | |
453 | if (csk->csk_family == AF_INET) |
454 | port = &csk->saddr.sin_port; |
455 | else /* ipv6 */ |
456 | port = &csk->saddr6.sin6_port; |
457 | |
458 | if (*port) { |
459 | pr_err("source port NON-ZERO %u.\n" , |
460 | ntohs(*port)); |
461 | return -EADDRINUSE; |
462 | } |
463 | |
464 | spin_lock_bh(lock: &pmap->lock); |
465 | if (pmap->used >= pmap->max_connect) { |
466 | spin_unlock_bh(lock: &pmap->lock); |
467 | pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n" , |
468 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); |
469 | return -EADDRNOTAVAIL; |
470 | } |
471 | |
472 | start = idx = pmap->next; |
473 | do { |
474 | if (++idx >= pmap->max_connect) |
475 | idx = 0; |
476 | if (!pmap->port_csk[idx]) { |
477 | pmap->used++; |
478 | *port = htons(pmap->sport_base + idx); |
479 | pmap->next = idx; |
480 | pmap->port_csk[idx] = csk; |
481 | spin_unlock_bh(lock: &pmap->lock); |
482 | cxgbi_sock_get(csk); |
483 | log_debug(1 << CXGBI_DBG_SOCK, |
484 | "cdev 0x%p, p#%u %s, p %u, %u.\n" , |
485 | cdev, csk->port_id, |
486 | cdev->ports[csk->port_id]->name, |
487 | pmap->sport_base + idx, pmap->next); |
488 | return 0; |
489 | } |
490 | } while (idx != start); |
491 | spin_unlock_bh(lock: &pmap->lock); |
492 | |
493 | /* should not happen */ |
494 | pr_warn("cdev 0x%p, p#%u %s, next %u?\n" , |
495 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, |
496 | pmap->next); |
497 | return -EADDRNOTAVAIL; |
498 | } |
499 | |
500 | static void sock_put_port(struct cxgbi_sock *csk) |
501 | { |
502 | struct cxgbi_device *cdev = csk->cdev; |
503 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
504 | __be16 *port; |
505 | |
506 | if (csk->csk_family == AF_INET) |
507 | port = &csk->saddr.sin_port; |
508 | else /* ipv6 */ |
509 | port = &csk->saddr6.sin6_port; |
510 | |
511 | if (*port) { |
512 | int idx = ntohs(*port) - pmap->sport_base; |
513 | |
514 | *port = 0; |
515 | if (idx < 0 || idx >= pmap->max_connect) { |
516 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n" , |
517 | cdev, csk->port_id, |
518 | cdev->ports[csk->port_id]->name, |
519 | ntohs(*port)); |
520 | return; |
521 | } |
522 | |
523 | spin_lock_bh(lock: &pmap->lock); |
524 | pmap->port_csk[idx] = NULL; |
525 | pmap->used--; |
526 | spin_unlock_bh(lock: &pmap->lock); |
527 | |
528 | log_debug(1 << CXGBI_DBG_SOCK, |
529 | "cdev 0x%p, p#%u %s, release %u.\n" , |
530 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, |
531 | pmap->sport_base + idx); |
532 | |
533 | cxgbi_sock_put(csk); |
534 | } |
535 | } |
536 | |
537 | /* |
538 | * iscsi tcp connection |
539 | */ |
540 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) |
541 | { |
542 | if (csk->cpl_close) { |
543 | kfree_skb(skb: csk->cpl_close); |
544 | csk->cpl_close = NULL; |
545 | } |
546 | if (csk->cpl_abort_req) { |
547 | kfree_skb(skb: csk->cpl_abort_req); |
548 | csk->cpl_abort_req = NULL; |
549 | } |
550 | if (csk->cpl_abort_rpl) { |
551 | kfree_skb(skb: csk->cpl_abort_rpl); |
552 | csk->cpl_abort_rpl = NULL; |
553 | } |
554 | } |
555 | EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); |
556 | |
557 | static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) |
558 | { |
559 | struct cxgbi_sock *csk = kzalloc(size: sizeof(*csk), GFP_NOIO); |
560 | |
561 | if (!csk) { |
562 | pr_info("alloc csk %zu failed.\n" , sizeof(*csk)); |
563 | return NULL; |
564 | } |
565 | |
566 | if (cdev->csk_alloc_cpls(csk) < 0) { |
567 | pr_info("csk 0x%p, alloc cpls failed.\n" , csk); |
568 | kfree(objp: csk); |
569 | return NULL; |
570 | } |
571 | |
572 | spin_lock_init(&csk->lock); |
573 | kref_init(kref: &csk->refcnt); |
574 | skb_queue_head_init(list: &csk->receive_queue); |
575 | skb_queue_head_init(list: &csk->write_queue); |
576 | timer_setup(&csk->retry_timer, NULL, 0); |
577 | init_completion(x: &csk->cmpl); |
578 | rwlock_init(&csk->callback_lock); |
579 | csk->cdev = cdev; |
580 | csk->flags = 0; |
581 | cxgbi_sock_set_state(csk, state: CTP_CLOSED); |
582 | |
583 | log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n" , cdev, csk); |
584 | |
585 | return csk; |
586 | } |
587 | |
588 | static struct rtable *find_route_ipv4(struct flowi4 *fl4, |
589 | __be32 saddr, __be32 daddr, |
590 | __be16 sport, __be16 dport, u8 tos, |
591 | int ifindex) |
592 | { |
593 | struct rtable *rt; |
594 | |
595 | rt = ip_route_output_ports(net: &init_net, fl4, NULL, daddr, saddr, |
596 | dport, sport, IPPROTO_TCP, tos, oif: ifindex); |
597 | if (IS_ERR(ptr: rt)) |
598 | return NULL; |
599 | |
600 | return rt; |
601 | } |
602 | |
603 | static struct cxgbi_sock * |
604 | cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) |
605 | { |
606 | struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; |
607 | struct dst_entry *dst; |
608 | struct net_device *ndev; |
609 | struct cxgbi_device *cdev; |
610 | struct rtable *rt = NULL; |
611 | struct neighbour *n; |
612 | struct flowi4 fl4; |
613 | struct cxgbi_sock *csk = NULL; |
614 | unsigned int mtu = 0; |
615 | int port = 0xFFFF; |
616 | int err = 0; |
617 | |
618 | rt = find_route_ipv4(fl4: &fl4, saddr: 0, daddr: daddr->sin_addr.s_addr, sport: 0, |
619 | dport: daddr->sin_port, tos: 0, ifindex); |
620 | if (!rt) { |
621 | pr_info("no route to ipv4 0x%x, port %u.\n" , |
622 | be32_to_cpu(daddr->sin_addr.s_addr), |
623 | be16_to_cpu(daddr->sin_port)); |
624 | err = -ENETUNREACH; |
625 | goto err_out; |
626 | } |
627 | dst = &rt->dst; |
628 | n = dst_neigh_lookup(dst, daddr: &daddr->sin_addr.s_addr); |
629 | if (!n) { |
630 | err = -ENODEV; |
631 | goto rel_rt; |
632 | } |
633 | ndev = n->dev; |
634 | |
635 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { |
636 | pr_info("multi-cast route %pI4, port %u, dev %s.\n" , |
637 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), |
638 | ndev->name); |
639 | err = -ENETUNREACH; |
640 | goto rel_neigh; |
641 | } |
642 | |
643 | if (ndev->flags & IFF_LOOPBACK) { |
644 | ndev = ip_dev_find(net: &init_net, addr: daddr->sin_addr.s_addr); |
645 | if (!ndev) { |
646 | err = -ENETUNREACH; |
647 | goto rel_neigh; |
648 | } |
649 | mtu = ndev->mtu; |
650 | pr_info("rt dev %s, loopback -> %s, mtu %u.\n" , |
651 | n->dev->name, ndev->name, mtu); |
652 | } |
653 | |
654 | if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(dev: ndev)) { |
655 | pr_info("%s interface not up.\n" , ndev->name); |
656 | err = -ENETDOWN; |
657 | goto rel_neigh; |
658 | } |
659 | |
660 | cdev = cxgbi_device_find_by_netdev(ndev, &port); |
661 | if (!cdev) |
662 | cdev = cxgbi_device_find_by_mac(ndev, port: &port); |
663 | if (!cdev) { |
664 | pr_info("dst %pI4, %s, NOT cxgbi device.\n" , |
665 | &daddr->sin_addr.s_addr, ndev->name); |
666 | err = -ENETUNREACH; |
667 | goto rel_neigh; |
668 | } |
669 | log_debug(1 << CXGBI_DBG_SOCK, |
670 | "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n" , |
671 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), |
672 | port, ndev->name, cdev); |
673 | |
674 | csk = cxgbi_sock_create(cdev); |
675 | if (!csk) { |
676 | err = -ENOMEM; |
677 | goto rel_neigh; |
678 | } |
679 | csk->cdev = cdev; |
680 | csk->port_id = port; |
681 | csk->mtu = mtu; |
682 | csk->dst = dst; |
683 | |
684 | csk->csk_family = AF_INET; |
685 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; |
686 | csk->daddr.sin_port = daddr->sin_port; |
687 | csk->daddr.sin_family = daddr->sin_family; |
688 | csk->saddr.sin_family = daddr->sin_family; |
689 | csk->saddr.sin_addr.s_addr = fl4.saddr; |
690 | neigh_release(neigh: n); |
691 | |
692 | return csk; |
693 | |
694 | rel_neigh: |
695 | neigh_release(neigh: n); |
696 | |
697 | rel_rt: |
698 | ip_rt_put(rt); |
699 | err_out: |
700 | return ERR_PTR(error: err); |
701 | } |
702 | |
703 | #if IS_ENABLED(CONFIG_IPV6) |
704 | static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, |
705 | const struct in6_addr *daddr, |
706 | int ifindex) |
707 | { |
708 | struct flowi6 fl; |
709 | |
710 | memset(&fl, 0, sizeof(fl)); |
711 | fl.flowi6_oif = ifindex; |
712 | if (saddr) |
713 | memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); |
714 | if (daddr) |
715 | memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); |
716 | return (struct rt6_info *)ip6_route_output(net: &init_net, NULL, fl6: &fl); |
717 | } |
718 | |
719 | static struct cxgbi_sock * |
720 | cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) |
721 | { |
722 | struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; |
723 | struct dst_entry *dst; |
724 | struct net_device *ndev; |
725 | struct cxgbi_device *cdev; |
726 | struct rt6_info *rt = NULL; |
727 | struct neighbour *n; |
728 | struct in6_addr pref_saddr; |
729 | struct cxgbi_sock *csk = NULL; |
730 | unsigned int mtu = 0; |
731 | int port = 0xFFFF; |
732 | int err = 0; |
733 | |
734 | rt = find_route_ipv6(NULL, daddr: &daddr6->sin6_addr, ifindex); |
735 | |
736 | if (!rt) { |
737 | pr_info("no route to ipv6 %pI6 port %u\n" , |
738 | daddr6->sin6_addr.s6_addr, |
739 | be16_to_cpu(daddr6->sin6_port)); |
740 | err = -ENETUNREACH; |
741 | goto err_out; |
742 | } |
743 | |
744 | dst = &rt->dst; |
745 | |
746 | n = dst_neigh_lookup(dst, daddr: &daddr6->sin6_addr); |
747 | |
748 | if (!n) { |
749 | pr_info("%pI6, port %u, dst no neighbour.\n" , |
750 | daddr6->sin6_addr.s6_addr, |
751 | be16_to_cpu(daddr6->sin6_port)); |
752 | err = -ENETUNREACH; |
753 | goto rel_rt; |
754 | } |
755 | ndev = n->dev; |
756 | |
757 | if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(dev: ndev)) { |
758 | pr_info("%s interface not up.\n" , ndev->name); |
759 | err = -ENETDOWN; |
760 | goto rel_rt; |
761 | } |
762 | |
763 | if (ipv6_addr_is_multicast(addr: &daddr6->sin6_addr)) { |
764 | pr_info("multi-cast route %pI6 port %u, dev %s.\n" , |
765 | daddr6->sin6_addr.s6_addr, |
766 | ntohs(daddr6->sin6_port), ndev->name); |
767 | err = -ENETUNREACH; |
768 | goto rel_rt; |
769 | } |
770 | |
771 | cdev = cxgbi_device_find_by_netdev(ndev, &port); |
772 | if (!cdev) |
773 | cdev = cxgbi_device_find_by_mac(ndev, port: &port); |
774 | if (!cdev) { |
775 | pr_info("dst %pI6 %s, NOT cxgbi device.\n" , |
776 | daddr6->sin6_addr.s6_addr, ndev->name); |
777 | err = -ENETUNREACH; |
778 | goto rel_rt; |
779 | } |
780 | log_debug(1 << CXGBI_DBG_SOCK, |
781 | "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n" , |
782 | daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, |
783 | ndev->name, cdev); |
784 | |
785 | csk = cxgbi_sock_create(cdev); |
786 | if (!csk) { |
787 | err = -ENOMEM; |
788 | goto rel_rt; |
789 | } |
790 | csk->cdev = cdev; |
791 | csk->port_id = port; |
792 | csk->mtu = mtu; |
793 | csk->dst = dst; |
794 | |
795 | rt6_get_prefsrc(rt, addr: &pref_saddr); |
796 | if (ipv6_addr_any(a: &pref_saddr)) { |
797 | struct inet6_dev *idev = ip6_dst_idev(dst: (struct dst_entry *)rt); |
798 | |
799 | err = ipv6_dev_get_saddr(net: &init_net, dev: idev ? idev->dev : NULL, |
800 | daddr: &daddr6->sin6_addr, srcprefs: 0, saddr: &pref_saddr); |
801 | if (err) { |
802 | pr_info("failed to get source address to reach %pI6\n" , |
803 | &daddr6->sin6_addr); |
804 | goto rel_rt; |
805 | } |
806 | } |
807 | |
808 | csk->csk_family = AF_INET6; |
809 | csk->daddr6.sin6_addr = daddr6->sin6_addr; |
810 | csk->daddr6.sin6_port = daddr6->sin6_port; |
811 | csk->daddr6.sin6_family = daddr6->sin6_family; |
812 | csk->saddr6.sin6_family = daddr6->sin6_family; |
813 | csk->saddr6.sin6_addr = pref_saddr; |
814 | |
815 | neigh_release(neigh: n); |
816 | return csk; |
817 | |
818 | rel_rt: |
819 | if (n) |
820 | neigh_release(neigh: n); |
821 | |
822 | ip6_rt_put(rt); |
823 | if (csk) |
824 | cxgbi_sock_closed(csk); |
825 | err_out: |
826 | return ERR_PTR(error: err); |
827 | } |
828 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
829 | |
830 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, |
831 | unsigned int opt) |
832 | { |
833 | csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; |
834 | dst_confirm(dst: csk->dst); |
835 | smp_mb(); |
836 | cxgbi_sock_set_state(csk, state: CTP_ESTABLISHED); |
837 | } |
838 | EXPORT_SYMBOL_GPL(cxgbi_sock_established); |
839 | |
840 | static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) |
841 | { |
842 | log_debug(1 << CXGBI_DBG_SOCK, |
843 | "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n" , |
844 | csk, csk->state, csk->flags, csk->user_data); |
845 | |
846 | if (csk->state != CTP_ESTABLISHED) { |
847 | read_lock_bh(&csk->callback_lock); |
848 | if (csk->user_data) |
849 | iscsi_conn_failure(conn: csk->user_data, |
850 | err: ISCSI_ERR_TCP_CONN_CLOSE); |
851 | read_unlock_bh(&csk->callback_lock); |
852 | } |
853 | } |
854 | |
855 | void cxgbi_sock_closed(struct cxgbi_sock *csk) |
856 | { |
857 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n" , |
858 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
859 | cxgbi_sock_set_flag(csk, flag: CTPF_ACTIVE_CLOSE_NEEDED); |
860 | if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) |
861 | return; |
862 | if (csk->saddr.sin_port) |
863 | sock_put_port(csk); |
864 | if (csk->dst) |
865 | dst_release(dst: csk->dst); |
866 | csk->cdev->csk_release_offload_resources(csk); |
867 | cxgbi_sock_set_state(csk, state: CTP_CLOSED); |
868 | cxgbi_inform_iscsi_conn_closing(csk); |
869 | cxgbi_sock_put(csk); |
870 | } |
871 | EXPORT_SYMBOL_GPL(cxgbi_sock_closed); |
872 | |
873 | static void need_active_close(struct cxgbi_sock *csk) |
874 | { |
875 | int data_lost; |
876 | int close_req = 0; |
877 | |
878 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n" , |
879 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
880 | spin_lock_bh(lock: &csk->lock); |
881 | if (csk->dst) |
882 | dst_confirm(dst: csk->dst); |
883 | data_lost = skb_queue_len(list_: &csk->receive_queue); |
884 | __skb_queue_purge(list: &csk->receive_queue); |
885 | |
886 | if (csk->state == CTP_ACTIVE_OPEN) |
887 | cxgbi_sock_set_flag(csk, flag: CTPF_ACTIVE_CLOSE_NEEDED); |
888 | else if (csk->state == CTP_ESTABLISHED) { |
889 | close_req = 1; |
890 | cxgbi_sock_set_state(csk, state: CTP_ACTIVE_CLOSE); |
891 | } else if (csk->state == CTP_PASSIVE_CLOSE) { |
892 | close_req = 1; |
893 | cxgbi_sock_set_state(csk, state: CTP_CLOSE_WAIT_2); |
894 | } |
895 | |
896 | if (close_req) { |
897 | if (!cxgbi_sock_flag(csk, flag: CTPF_LOGOUT_RSP_RCVD) || |
898 | data_lost) |
899 | csk->cdev->csk_send_abort_req(csk); |
900 | else |
901 | csk->cdev->csk_send_close_req(csk); |
902 | } |
903 | |
904 | spin_unlock_bh(lock: &csk->lock); |
905 | } |
906 | |
907 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) |
908 | { |
909 | pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n" , |
910 | csk, csk->state, csk->flags, |
911 | &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, |
912 | &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, |
913 | errno); |
914 | |
915 | cxgbi_sock_set_state(csk, state: CTP_CONNECTING); |
916 | csk->err = errno; |
917 | cxgbi_sock_closed(csk); |
918 | } |
919 | EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); |
920 | |
921 | void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) |
922 | { |
923 | struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; |
924 | struct module *owner = csk->cdev->owner; |
925 | |
926 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n" , |
927 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
928 | cxgbi_sock_get(csk); |
929 | spin_lock_bh(lock: &csk->lock); |
930 | if (csk->state == CTP_ACTIVE_OPEN) |
931 | cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); |
932 | spin_unlock_bh(lock: &csk->lock); |
933 | cxgbi_sock_put(csk); |
934 | __kfree_skb(skb); |
935 | |
936 | module_put(module: owner); |
937 | } |
938 | EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); |
939 | |
940 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) |
941 | { |
942 | cxgbi_sock_get(csk); |
943 | spin_lock_bh(lock: &csk->lock); |
944 | |
945 | cxgbi_sock_set_flag(csk, flag: CTPF_ABORT_RPL_RCVD); |
946 | if (cxgbi_sock_flag(csk, flag: CTPF_ABORT_RPL_PENDING)) { |
947 | cxgbi_sock_clear_flag(csk, flag: CTPF_ABORT_RPL_PENDING); |
948 | if (cxgbi_sock_flag(csk, flag: CTPF_ABORT_REQ_RCVD)) |
949 | pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n" , |
950 | csk, csk->state, csk->flags, csk->tid); |
951 | cxgbi_sock_closed(csk); |
952 | } |
953 | |
954 | spin_unlock_bh(lock: &csk->lock); |
955 | cxgbi_sock_put(csk); |
956 | } |
957 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); |
958 | |
959 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) |
960 | { |
961 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n" , |
962 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
963 | cxgbi_sock_get(csk); |
964 | spin_lock_bh(lock: &csk->lock); |
965 | |
966 | if (cxgbi_sock_flag(csk, flag: CTPF_ABORT_RPL_PENDING)) |
967 | goto done; |
968 | |
969 | switch (csk->state) { |
970 | case CTP_ESTABLISHED: |
971 | cxgbi_sock_set_state(csk, state: CTP_PASSIVE_CLOSE); |
972 | break; |
973 | case CTP_ACTIVE_CLOSE: |
974 | cxgbi_sock_set_state(csk, state: CTP_CLOSE_WAIT_2); |
975 | break; |
976 | case CTP_CLOSE_WAIT_1: |
977 | cxgbi_sock_closed(csk); |
978 | break; |
979 | case CTP_ABORTING: |
980 | break; |
981 | default: |
982 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
983 | csk, csk->state, csk->flags, csk->tid); |
984 | } |
985 | cxgbi_inform_iscsi_conn_closing(csk); |
986 | done: |
987 | spin_unlock_bh(lock: &csk->lock); |
988 | cxgbi_sock_put(csk); |
989 | } |
990 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); |
991 | |
992 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) |
993 | { |
994 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n" , |
995 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
996 | cxgbi_sock_get(csk); |
997 | spin_lock_bh(lock: &csk->lock); |
998 | |
999 | csk->snd_una = snd_nxt - 1; |
1000 | if (cxgbi_sock_flag(csk, flag: CTPF_ABORT_RPL_PENDING)) |
1001 | goto done; |
1002 | |
1003 | switch (csk->state) { |
1004 | case CTP_ACTIVE_CLOSE: |
1005 | cxgbi_sock_set_state(csk, state: CTP_CLOSE_WAIT_1); |
1006 | break; |
1007 | case CTP_CLOSE_WAIT_1: |
1008 | case CTP_CLOSE_WAIT_2: |
1009 | cxgbi_sock_closed(csk); |
1010 | break; |
1011 | case CTP_ABORTING: |
1012 | break; |
1013 | default: |
1014 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n" , |
1015 | csk, csk->state, csk->flags, csk->tid); |
1016 | } |
1017 | done: |
1018 | spin_unlock_bh(lock: &csk->lock); |
1019 | cxgbi_sock_put(csk); |
1020 | } |
1021 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); |
1022 | |
1023 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, |
1024 | unsigned int snd_una, int seq_chk) |
1025 | { |
1026 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1027 | "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n" , |
1028 | csk, csk->state, csk->flags, csk->tid, credits, |
1029 | csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); |
1030 | |
1031 | spin_lock_bh(lock: &csk->lock); |
1032 | |
1033 | csk->wr_cred += credits; |
1034 | if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) |
1035 | csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; |
1036 | |
1037 | while (credits) { |
1038 | struct sk_buff *p = cxgbi_sock_peek_wr(csk); |
1039 | |
1040 | if (unlikely(!p)) { |
1041 | pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n" , |
1042 | csk, csk->state, csk->flags, csk->tid, credits, |
1043 | csk->wr_cred, csk->wr_una_cred); |
1044 | break; |
1045 | } |
1046 | |
1047 | if (unlikely(credits < p->csum)) { |
1048 | pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n" , |
1049 | csk, csk->state, csk->flags, csk->tid, |
1050 | credits, csk->wr_cred, csk->wr_una_cred, |
1051 | p->csum); |
1052 | p->csum -= credits; |
1053 | break; |
1054 | } else { |
1055 | cxgbi_sock_dequeue_wr(csk); |
1056 | credits -= p->csum; |
1057 | kfree_skb(skb: p); |
1058 | } |
1059 | } |
1060 | |
1061 | cxgbi_sock_check_wr_invariants(csk); |
1062 | |
1063 | if (seq_chk) { |
1064 | if (unlikely(before(snd_una, csk->snd_una))) { |
1065 | pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u." , |
1066 | csk, csk->state, csk->flags, csk->tid, snd_una, |
1067 | csk->snd_una); |
1068 | goto done; |
1069 | } |
1070 | |
1071 | if (csk->snd_una != snd_una) { |
1072 | csk->snd_una = snd_una; |
1073 | dst_confirm(dst: csk->dst); |
1074 | } |
1075 | } |
1076 | |
1077 | if (skb_queue_len(list_: &csk->write_queue)) { |
1078 | if (csk->cdev->csk_push_tx_frames(csk, 0)) |
1079 | cxgbi_conn_tx_open(csk); |
1080 | } else |
1081 | cxgbi_conn_tx_open(csk); |
1082 | done: |
1083 | spin_unlock_bh(lock: &csk->lock); |
1084 | } |
1085 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); |
1086 | |
1087 | static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, |
1088 | unsigned short mtu) |
1089 | { |
1090 | int i = 0; |
1091 | |
1092 | while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) |
1093 | ++i; |
1094 | |
1095 | return i; |
1096 | } |
1097 | |
1098 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) |
1099 | { |
1100 | unsigned int idx; |
1101 | struct dst_entry *dst = csk->dst; |
1102 | |
1103 | csk->advmss = dst_metric_advmss(dst); |
1104 | |
1105 | if (csk->advmss > pmtu - 40) |
1106 | csk->advmss = pmtu - 40; |
1107 | if (csk->advmss < csk->cdev->mtus[0] - 40) |
1108 | csk->advmss = csk->cdev->mtus[0] - 40; |
1109 | idx = cxgbi_sock_find_best_mtu(csk, mtu: csk->advmss + 40); |
1110 | |
1111 | return idx; |
1112 | } |
1113 | EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); |
1114 | |
1115 | void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) |
1116 | { |
1117 | cxgbi_skcb_tcp_seq(skb) = csk->write_seq; |
1118 | __skb_queue_tail(list: &csk->write_queue, newsk: skb); |
1119 | } |
1120 | EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); |
1121 | |
1122 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) |
1123 | { |
1124 | struct sk_buff *skb; |
1125 | |
1126 | while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) |
1127 | kfree_skb(skb); |
1128 | } |
1129 | EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); |
1130 | |
1131 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) |
1132 | { |
1133 | int pending = cxgbi_sock_count_pending_wrs(csk); |
1134 | |
1135 | if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) |
1136 | pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n" , |
1137 | csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); |
1138 | } |
1139 | EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); |
1140 | |
1141 | static inline void |
1142 | scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, |
1143 | unsigned int *sgcnt, unsigned int *dlen, |
1144 | unsigned int prot) |
1145 | { |
1146 | struct scsi_data_buffer *sdb = prot ? scsi_prot(cmd: sc) : &sc->sdb; |
1147 | |
1148 | *sgl = sdb->table.sgl; |
1149 | *sgcnt = sdb->table.nents; |
1150 | *dlen = sdb->length; |
1151 | /* Caution: for protection sdb, sdb->length is invalid */ |
1152 | } |
1153 | |
1154 | void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, |
1155 | struct cxgbi_task_tag_info *ttinfo, |
1156 | struct scatterlist **sg_pp, unsigned int *sg_off) |
1157 | { |
1158 | struct scatterlist *sg = sg_pp ? *sg_pp : NULL; |
1159 | unsigned int offset = sg_off ? *sg_off : 0; |
1160 | dma_addr_t addr = 0UL; |
1161 | unsigned int len = 0; |
1162 | int i; |
1163 | |
1164 | memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); |
1165 | |
1166 | if (sg) { |
1167 | addr = sg_dma_address(sg); |
1168 | len = sg_dma_len(sg); |
1169 | } |
1170 | |
1171 | for (i = 0; i < PPOD_PAGES_MAX; i++) { |
1172 | if (sg) { |
1173 | ppod->addr[i] = cpu_to_be64(addr + offset); |
1174 | offset += PAGE_SIZE; |
1175 | if (offset == (len + sg->offset)) { |
1176 | offset = 0; |
1177 | sg = sg_next(sg); |
1178 | if (sg) { |
1179 | addr = sg_dma_address(sg); |
1180 | len = sg_dma_len(sg); |
1181 | } |
1182 | } |
1183 | } else { |
1184 | ppod->addr[i] = 0ULL; |
1185 | } |
1186 | } |
1187 | |
1188 | /* |
1189 | * the fifth address needs to be repeated in the next ppod, so do |
1190 | * not move sg |
1191 | */ |
1192 | if (sg_pp) { |
1193 | *sg_pp = sg; |
1194 | *sg_off = offset; |
1195 | } |
1196 | |
1197 | if (offset == len) { |
1198 | offset = 0; |
1199 | sg = sg_next(sg); |
1200 | if (sg) { |
1201 | addr = sg_dma_address(sg); |
1202 | len = sg_dma_len(sg); |
1203 | } |
1204 | } |
1205 | ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; |
1206 | } |
1207 | EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); |
1208 | |
1209 | /* |
1210 | * APIs interacting with open-iscsi libraries |
1211 | */ |
1212 | |
1213 | int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, |
1214 | struct cxgbi_tag_format *tformat, |
1215 | unsigned int iscsi_size, unsigned int llimit, |
1216 | unsigned int start, unsigned int rsvd_factor, |
1217 | unsigned int edram_start, unsigned int edram_size) |
1218 | { |
1219 | int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, |
1220 | cdev->lldev, tformat, iscsi_size, llimit, start, |
1221 | rsvd_factor, edram_start, edram_size); |
1222 | |
1223 | if (err >= 0) { |
1224 | struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); |
1225 | |
1226 | if (ppm->ppmax < 1024 || |
1227 | ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) |
1228 | cdev->flags |= CXGBI_FLAG_DDP_OFF; |
1229 | err = 0; |
1230 | } else { |
1231 | cdev->flags |= CXGBI_FLAG_DDP_OFF; |
1232 | } |
1233 | |
1234 | return err; |
1235 | } |
1236 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); |
1237 | |
1238 | static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) |
1239 | { |
1240 | int i; |
1241 | int last_sgidx = nents - 1; |
1242 | struct scatterlist *sg = sgl; |
1243 | |
1244 | for (i = 0; i < nents; i++, sg = sg_next(sg)) { |
1245 | unsigned int len = sg->length + sg->offset; |
1246 | |
1247 | if ((sg->offset & 0x3) || (i && sg->offset) || |
1248 | ((i != last_sgidx) && len != PAGE_SIZE)) { |
1249 | log_debug(1 << CXGBI_DBG_DDP, |
1250 | "sg %u/%u, %u,%u, not aligned.\n" , |
1251 | i, nents, sg->offset, sg->length); |
1252 | goto err_out; |
1253 | } |
1254 | } |
1255 | return 0; |
1256 | err_out: |
1257 | return -EINVAL; |
1258 | } |
1259 | |
1260 | static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, |
1261 | struct cxgbi_task_data *tdata, u32 sw_tag, |
1262 | unsigned int xferlen) |
1263 | { |
1264 | struct cxgbi_sock *csk = cconn->cep->csk; |
1265 | struct cxgbi_device *cdev = csk->cdev; |
1266 | struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); |
1267 | struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; |
1268 | struct scatterlist *sgl = ttinfo->sgl; |
1269 | unsigned int sgcnt = ttinfo->nents; |
1270 | unsigned int sg_offset = sgl->offset; |
1271 | int err; |
1272 | |
1273 | if (cdev->flags & CXGBI_FLAG_DDP_OFF) { |
1274 | log_debug(1 << CXGBI_DBG_DDP, |
1275 | "cdev 0x%p DDP off.\n" , cdev); |
1276 | return -EINVAL; |
1277 | } |
1278 | |
1279 | if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || |
1280 | ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { |
1281 | log_debug(1 << CXGBI_DBG_DDP, |
1282 | "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n" , |
1283 | ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, |
1284 | xferlen, ttinfo->nents); |
1285 | return -EINVAL; |
1286 | } |
1287 | |
1288 | /* make sure the buffer is suitable for ddp */ |
1289 | if (cxgbi_ddp_sgl_check(sgl, nents: sgcnt) < 0) |
1290 | return -EINVAL; |
1291 | |
1292 | ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> |
1293 | PAGE_SHIFT; |
1294 | |
1295 | /* |
1296 | * the ddp tag will be used for the itt in the outgoing pdu, |
1297 | * the itt generated by libiscsi is saved in the ppm and can be |
1298 | * retrieved via the ddp tag |
1299 | */ |
1300 | err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, |
1301 | &ttinfo->tag, (unsigned long)sw_tag); |
1302 | if (err < 0) { |
1303 | cconn->ddp_full++; |
1304 | return err; |
1305 | } |
1306 | ttinfo->npods = err; |
1307 | |
1308 | /* setup dma from scsi command sgl */ |
1309 | sgl->offset = 0; |
1310 | err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); |
1311 | sgl->offset = sg_offset; |
1312 | if (err == 0) { |
1313 | pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n" , |
1314 | __func__, sw_tag, xferlen, sgcnt); |
1315 | goto rel_ppods; |
1316 | } |
1317 | if (err != ttinfo->nr_pages) { |
1318 | log_debug(1 << CXGBI_DBG_DDP, |
1319 | "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n" , |
1320 | __func__, sw_tag, xferlen, sgcnt, err); |
1321 | } |
1322 | |
1323 | ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; |
1324 | ttinfo->cid = csk->port_id; |
1325 | |
1326 | cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, |
1327 | xferlen, &ttinfo->hdr); |
1328 | |
1329 | if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { |
1330 | /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ |
1331 | ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; |
1332 | } else { |
1333 | /* write ppod from control queue now */ |
1334 | err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); |
1335 | if (err < 0) |
1336 | goto rel_ppods; |
1337 | } |
1338 | |
1339 | return 0; |
1340 | |
1341 | rel_ppods: |
1342 | cxgbi_ppm_ppod_release(ppm, ttinfo->idx); |
1343 | |
1344 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { |
1345 | ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; |
1346 | dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); |
1347 | } |
1348 | return -EINVAL; |
1349 | } |
1350 | |
1351 | static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) |
1352 | { |
1353 | struct scsi_cmnd *sc = task->sc; |
1354 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
1355 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1356 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1357 | struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); |
1358 | u32 tag = ntohl((__force u32)hdr_itt); |
1359 | |
1360 | log_debug(1 << CXGBI_DBG_DDP, |
1361 | "cdev 0x%p, task 0x%p, release tag 0x%x.\n" , |
1362 | cdev, task, tag); |
1363 | if (sc && sc->sc_data_direction == DMA_FROM_DEVICE && |
1364 | cxgbi_ppm_is_ddp_tag(ppm, tag)) { |
1365 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1366 | struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; |
1367 | |
1368 | if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) |
1369 | cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); |
1370 | cxgbi_ppm_ppod_release(ppm, ttinfo->idx); |
1371 | dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, |
1372 | DMA_FROM_DEVICE); |
1373 | } |
1374 | } |
1375 | |
1376 | static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) |
1377 | { |
1378 | /* assume idx and age both are < 0x7FFF (32767) */ |
1379 | return (idx << 16) | age; |
1380 | } |
1381 | |
1382 | static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) |
1383 | { |
1384 | struct scsi_cmnd *sc = task->sc; |
1385 | struct iscsi_conn *conn = task->conn; |
1386 | struct iscsi_session *sess = conn->session; |
1387 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1388 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1389 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1390 | struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); |
1391 | u32 sw_tag = cxgbi_build_sw_tag(idx: task->itt, age: sess->age); |
1392 | u32 tag = 0; |
1393 | int err = -EINVAL; |
1394 | |
1395 | if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) { |
1396 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1397 | struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; |
1398 | |
1399 | scmd_get_params(sc, sgl: &ttinfo->sgl, sgcnt: &ttinfo->nents, |
1400 | dlen: &tdata->dlen, prot: 0); |
1401 | err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, xferlen: tdata->dlen); |
1402 | if (!err) |
1403 | tag = ttinfo->tag; |
1404 | else |
1405 | log_debug(1 << CXGBI_DBG_DDP, |
1406 | "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n" , |
1407 | cconn->cep->csk, task, tdata->dlen, |
1408 | ttinfo->nents); |
1409 | } |
1410 | |
1411 | if (err < 0) { |
1412 | err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); |
1413 | if (err < 0) |
1414 | return err; |
1415 | } |
1416 | /* the itt need to sent in big-endian order */ |
1417 | *hdr_itt = (__force itt_t)htonl(tag); |
1418 | |
1419 | log_debug(1 << CXGBI_DBG_DDP, |
1420 | "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n" , |
1421 | cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); |
1422 | return 0; |
1423 | } |
1424 | |
1425 | void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) |
1426 | { |
1427 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1428 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1429 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1430 | struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); |
1431 | u32 tag = ntohl((__force u32)itt); |
1432 | u32 sw_bits; |
1433 | |
1434 | if (ppm) { |
1435 | if (cxgbi_ppm_is_ddp_tag(ppm, tag)) |
1436 | sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); |
1437 | else |
1438 | sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); |
1439 | } else { |
1440 | sw_bits = tag; |
1441 | } |
1442 | |
1443 | cxgbi_decode_sw_tag(sw_tag: sw_bits, idx, age); |
1444 | log_debug(1 << CXGBI_DBG_DDP, |
1445 | "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n" , |
1446 | cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, |
1447 | age ? *age : 0xFF); |
1448 | } |
1449 | EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); |
1450 | |
1451 | void cxgbi_conn_tx_open(struct cxgbi_sock *csk) |
1452 | { |
1453 | struct iscsi_conn *conn = csk->user_data; |
1454 | |
1455 | if (conn) { |
1456 | log_debug(1 << CXGBI_DBG_SOCK, |
1457 | "csk 0x%p, cid %d.\n" , csk, conn->id); |
1458 | iscsi_conn_queue_xmit(conn); |
1459 | } |
1460 | } |
1461 | EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); |
1462 | |
1463 | /* |
1464 | * pdu receive, interact with libiscsi_tcp |
1465 | */ |
1466 | static inline int read_pdu_skb(struct iscsi_conn *conn, |
1467 | struct sk_buff *skb, |
1468 | unsigned int offset, |
1469 | int offloaded) |
1470 | { |
1471 | int status = 0; |
1472 | int bytes_read; |
1473 | |
1474 | bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, status: &status); |
1475 | switch (status) { |
1476 | case ISCSI_TCP_CONN_ERR: |
1477 | pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n" , |
1478 | skb, offset, offloaded); |
1479 | return -EIO; |
1480 | case ISCSI_TCP_SUSPENDED: |
1481 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1482 | "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n" , |
1483 | skb, offset, offloaded, bytes_read); |
1484 | /* no transfer - just have caller flush queue */ |
1485 | return bytes_read; |
1486 | case ISCSI_TCP_SKB_DONE: |
1487 | pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n" , |
1488 | skb, offset, offloaded); |
1489 | /* |
1490 | * pdus should always fit in the skb and we should get |
1491 | * segment done notifcation. |
1492 | */ |
1493 | iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb." ); |
1494 | return -EFAULT; |
1495 | case ISCSI_TCP_SEGMENT_DONE: |
1496 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1497 | "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n" , |
1498 | skb, offset, offloaded, bytes_read); |
1499 | return bytes_read; |
1500 | default: |
1501 | pr_info("skb 0x%p, off %u, %d, invalid status %d.\n" , |
1502 | skb, offset, offloaded, status); |
1503 | return -EINVAL; |
1504 | } |
1505 | } |
1506 | |
1507 | static int |
1508 | skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, |
1509 | struct sk_buff *skb) |
1510 | { |
1511 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1512 | int err; |
1513 | |
1514 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1515 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n" , |
1516 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); |
1517 | |
1518 | if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { |
1519 | pr_info("conn 0x%p, skb 0x%p, not hdr.\n" , conn, skb); |
1520 | iscsi_conn_failure(conn, err: ISCSI_ERR_PROTO); |
1521 | return -EIO; |
1522 | } |
1523 | |
1524 | if (conn->hdrdgst_en && |
1525 | cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_HCRC_ERR)) { |
1526 | pr_info("conn 0x%p, skb 0x%p, hcrc.\n" , conn, skb); |
1527 | iscsi_conn_failure(conn, err: ISCSI_ERR_HDR_DGST); |
1528 | return -EIO; |
1529 | } |
1530 | |
1531 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_ISCSI_COMPL) && |
1532 | cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_DATA_DDPD)) { |
1533 | /* If completion flag is set and data is directly |
1534 | * placed in to the host memory then update |
1535 | * task->exp_datasn to the datasn in completion |
1536 | * iSCSI hdr as T6 adapter generates completion only |
1537 | * for the last pdu of a sequence. |
1538 | */ |
1539 | itt_t itt = ((struct iscsi_data *)skb->data)->itt; |
1540 | struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); |
1541 | u32 data_sn = be32_to_cpu(((struct iscsi_data *) |
1542 | skb->data)->datasn); |
1543 | if (task && task->sc) { |
1544 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
1545 | |
1546 | tcp_task->exp_datasn = data_sn; |
1547 | } |
1548 | } |
1549 | |
1550 | err = read_pdu_skb(conn, skb, offset: 0, offloaded: 0); |
1551 | if (likely(err >= 0)) { |
1552 | struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; |
1553 | u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; |
1554 | |
1555 | if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) |
1556 | cxgbi_sock_set_flag(csk, flag: CTPF_LOGOUT_RSP_RCVD); |
1557 | } |
1558 | |
1559 | return err; |
1560 | } |
1561 | |
1562 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, |
1563 | struct sk_buff *skb, unsigned int offset) |
1564 | { |
1565 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1566 | bool offloaded = 0; |
1567 | int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; |
1568 | |
1569 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1570 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n" , |
1571 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); |
1572 | |
1573 | if (conn->datadgst_en && |
1574 | cxgbi_skcb_test_flag(skb: lskb, flag: SKCBF_RX_DCRC_ERR)) { |
1575 | pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n" , |
1576 | conn, lskb, cxgbi_skcb_flags(lskb)); |
1577 | iscsi_conn_failure(conn, err: ISCSI_ERR_DATA_DGST); |
1578 | return -EIO; |
1579 | } |
1580 | |
1581 | if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) |
1582 | return 0; |
1583 | |
1584 | /* coalesced, add header digest length */ |
1585 | if (lskb == skb && conn->hdrdgst_en) |
1586 | offset += ISCSI_DIGEST_SIZE; |
1587 | |
1588 | if (cxgbi_skcb_test_flag(skb: lskb, flag: SKCBF_RX_DATA_DDPD)) |
1589 | offloaded = 1; |
1590 | |
1591 | if (opcode == ISCSI_OP_SCSI_DATA_IN) |
1592 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1593 | "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n" , |
1594 | skb, opcode, ntohl(tcp_conn->in.hdr->itt), |
1595 | tcp_conn->in.datalen, offloaded ? "is" : "not" ); |
1596 | |
1597 | return read_pdu_skb(conn, skb, offset, offloaded); |
1598 | } |
1599 | |
1600 | static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) |
1601 | { |
1602 | struct cxgbi_device *cdev = csk->cdev; |
1603 | int must_send; |
1604 | u32 credits; |
1605 | |
1606 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1607 | "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n" , |
1608 | csk, csk->state, csk->flags, csk->tid, csk->copied_seq, |
1609 | csk->rcv_wup, cdev->rx_credit_thres, |
1610 | csk->rcv_win); |
1611 | |
1612 | if (!cdev->rx_credit_thres) |
1613 | return; |
1614 | |
1615 | if (csk->state != CTP_ESTABLISHED) |
1616 | return; |
1617 | |
1618 | credits = csk->copied_seq - csk->rcv_wup; |
1619 | if (unlikely(!credits)) |
1620 | return; |
1621 | must_send = credits + 16384 >= csk->rcv_win; |
1622 | if (must_send || credits >= cdev->rx_credit_thres) |
1623 | csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); |
1624 | } |
1625 | |
1626 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) |
1627 | { |
1628 | struct cxgbi_device *cdev = csk->cdev; |
1629 | struct iscsi_conn *conn = csk->user_data; |
1630 | struct sk_buff *skb; |
1631 | unsigned int read = 0; |
1632 | int err = 0; |
1633 | |
1634 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1635 | "csk 0x%p, conn 0x%p.\n" , csk, conn); |
1636 | |
1637 | if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { |
1638 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1639 | "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n" , |
1640 | csk, conn, conn ? conn->id : 0xFF, |
1641 | conn ? conn->flags : 0xFF); |
1642 | return; |
1643 | } |
1644 | |
1645 | while (!err) { |
1646 | skb = skb_peek(list_: &csk->receive_queue); |
1647 | if (!skb || |
1648 | !(cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_STATUS))) { |
1649 | if (skb) |
1650 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1651 | "skb 0x%p, NOT ready 0x%lx.\n" , |
1652 | skb, cxgbi_skcb_flags(skb)); |
1653 | break; |
1654 | } |
1655 | __skb_unlink(skb, list: &csk->receive_queue); |
1656 | |
1657 | read += cxgbi_skcb_rx_pdulen(skb); |
1658 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1659 | "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n" , |
1660 | csk, skb, skb->len, cxgbi_skcb_flags(skb), |
1661 | cxgbi_skcb_rx_pdulen(skb)); |
1662 | |
1663 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_COALESCED)) { |
1664 | err = skb_read_pdu_bhs(csk, conn, skb); |
1665 | if (err < 0) { |
1666 | pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " |
1667 | "f 0x%lx, plen %u.\n" , |
1668 | csk, skb, skb->len, |
1669 | cxgbi_skcb_flags(skb), |
1670 | cxgbi_skcb_rx_pdulen(skb)); |
1671 | goto skb_done; |
1672 | } |
1673 | err = skb_read_pdu_data(conn, lskb: skb, skb, |
1674 | offset: err + cdev->skb_rx_extra); |
1675 | if (err < 0) |
1676 | pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " |
1677 | "f 0x%lx, plen %u.\n" , |
1678 | csk, skb, skb->len, |
1679 | cxgbi_skcb_flags(skb), |
1680 | cxgbi_skcb_rx_pdulen(skb)); |
1681 | } else { |
1682 | err = skb_read_pdu_bhs(csk, conn, skb); |
1683 | if (err < 0) { |
1684 | pr_err("bhs, csk 0x%p, skb 0x%p,%u, " |
1685 | "f 0x%lx, plen %u.\n" , |
1686 | csk, skb, skb->len, |
1687 | cxgbi_skcb_flags(skb), |
1688 | cxgbi_skcb_rx_pdulen(skb)); |
1689 | goto skb_done; |
1690 | } |
1691 | |
1692 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_RX_DATA)) { |
1693 | struct sk_buff *dskb; |
1694 | |
1695 | dskb = skb_peek(list_: &csk->receive_queue); |
1696 | if (!dskb) { |
1697 | pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," |
1698 | " plen %u, NO data.\n" , |
1699 | csk, skb, skb->len, |
1700 | cxgbi_skcb_flags(skb), |
1701 | cxgbi_skcb_rx_pdulen(skb)); |
1702 | err = -EIO; |
1703 | goto skb_done; |
1704 | } |
1705 | __skb_unlink(skb: dskb, list: &csk->receive_queue); |
1706 | |
1707 | err = skb_read_pdu_data(conn, lskb: skb, skb: dskb, offset: 0); |
1708 | if (err < 0) |
1709 | pr_err("data, csk 0x%p, skb 0x%p,%u, " |
1710 | "f 0x%lx, plen %u, dskb 0x%p," |
1711 | "%u.\n" , |
1712 | csk, skb, skb->len, |
1713 | cxgbi_skcb_flags(skb), |
1714 | cxgbi_skcb_rx_pdulen(skb), |
1715 | dskb, dskb->len); |
1716 | __kfree_skb(skb: dskb); |
1717 | } else |
1718 | err = skb_read_pdu_data(conn, lskb: skb, skb, offset: 0); |
1719 | } |
1720 | skb_done: |
1721 | __kfree_skb(skb); |
1722 | |
1723 | if (err < 0) |
1724 | break; |
1725 | } |
1726 | |
1727 | log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n" , csk, read); |
1728 | if (read) { |
1729 | csk->copied_seq += read; |
1730 | csk_return_rx_credits(csk, copied: read); |
1731 | conn->rxdata_octets += read; |
1732 | } |
1733 | |
1734 | if (err < 0) { |
1735 | pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n" , |
1736 | csk, conn, err, read); |
1737 | iscsi_conn_failure(conn, err: ISCSI_ERR_CONN_FAILED); |
1738 | } |
1739 | } |
1740 | EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); |
1741 | |
1742 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, |
1743 | unsigned int offset, unsigned int *off, |
1744 | struct scatterlist **sgp) |
1745 | { |
1746 | int i; |
1747 | struct scatterlist *sg; |
1748 | |
1749 | for_each_sg(sgl, sg, sgcnt, i) { |
1750 | if (offset < sg->length) { |
1751 | *off = offset; |
1752 | *sgp = sg; |
1753 | return 0; |
1754 | } |
1755 | offset -= sg->length; |
1756 | } |
1757 | return -EFAULT; |
1758 | } |
1759 | |
1760 | static int |
1761 | sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, |
1762 | unsigned int dlen, struct page_frag *frags, |
1763 | int frag_max, u32 *dlimit) |
1764 | { |
1765 | unsigned int datalen = dlen; |
1766 | unsigned int sglen = sg->length - sgoffset; |
1767 | struct page *page = sg_page(sg); |
1768 | int i; |
1769 | |
1770 | i = 0; |
1771 | do { |
1772 | unsigned int copy; |
1773 | |
1774 | if (!sglen) { |
1775 | sg = sg_next(sg); |
1776 | if (!sg) { |
1777 | pr_warn("sg %d NULL, len %u/%u.\n" , |
1778 | i, datalen, dlen); |
1779 | return -EINVAL; |
1780 | } |
1781 | sgoffset = 0; |
1782 | sglen = sg->length; |
1783 | page = sg_page(sg); |
1784 | |
1785 | } |
1786 | copy = min(datalen, sglen); |
1787 | if (i && page == frags[i - 1].page && |
1788 | sgoffset + sg->offset == |
1789 | frags[i - 1].offset + frags[i - 1].size) { |
1790 | frags[i - 1].size += copy; |
1791 | } else { |
1792 | if (i >= frag_max) { |
1793 | pr_warn("too many pages %u, dlen %u.\n" , |
1794 | frag_max, dlen); |
1795 | *dlimit = dlen - datalen; |
1796 | return -EINVAL; |
1797 | } |
1798 | |
1799 | frags[i].page = page; |
1800 | frags[i].offset = sg->offset + sgoffset; |
1801 | frags[i].size = copy; |
1802 | i++; |
1803 | } |
1804 | datalen -= copy; |
1805 | sgoffset += copy; |
1806 | sglen -= copy; |
1807 | } while (datalen); |
1808 | |
1809 | return i; |
1810 | } |
1811 | |
1812 | static void cxgbi_task_data_sgl_check(struct iscsi_task *task) |
1813 | { |
1814 | struct scsi_cmnd *sc = task->sc; |
1815 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1816 | struct scatterlist *sg, *sgl = NULL; |
1817 | u32 sgcnt = 0; |
1818 | int i; |
1819 | |
1820 | tdata->flags = CXGBI_TASK_SGL_CHECKED; |
1821 | if (!sc) |
1822 | return; |
1823 | |
1824 | scmd_get_params(sc, sgl: &sgl, sgcnt: &sgcnt, dlen: &tdata->dlen, prot: 0); |
1825 | if (!sgl || !sgcnt) { |
1826 | tdata->flags |= CXGBI_TASK_SGL_COPY; |
1827 | return; |
1828 | } |
1829 | |
1830 | for_each_sg(sgl, sg, sgcnt, i) { |
1831 | if (page_count(page: sg_page(sg)) < 1) { |
1832 | tdata->flags |= CXGBI_TASK_SGL_COPY; |
1833 | return; |
1834 | } |
1835 | } |
1836 | } |
1837 | |
1838 | static int |
1839 | cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count, |
1840 | u32 *dlimit) |
1841 | { |
1842 | struct scsi_cmnd *sc = task->sc; |
1843 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1844 | struct scatterlist *sgl = NULL; |
1845 | struct scatterlist *sg; |
1846 | u32 dlen = 0; |
1847 | u32 sgcnt; |
1848 | int err; |
1849 | |
1850 | if (!sc) |
1851 | return 0; |
1852 | |
1853 | scmd_get_params(sc, sgl: &sgl, sgcnt: &sgcnt, dlen: &dlen, prot: 0); |
1854 | if (!sgl || !sgcnt) |
1855 | return 0; |
1856 | |
1857 | err = sgl_seek_offset(sgl, sgcnt, offset, off: &tdata->sgoffset, sgp: &sg); |
1858 | if (err < 0) { |
1859 | pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n" , |
1860 | sgcnt, offset, tdata->dlen); |
1861 | return err; |
1862 | } |
1863 | err = sgl_read_to_frags(sg, sgoffset: tdata->sgoffset, dlen: count, |
1864 | frags: tdata->frags, MAX_SKB_FRAGS, dlimit); |
1865 | if (err < 0) { |
1866 | log_debug(1 << CXGBI_DBG_ISCSI, |
1867 | "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n" , |
1868 | sgcnt, offset, count, tdata->dlen, *dlimit); |
1869 | return err; |
1870 | } |
1871 | tdata->offset = offset; |
1872 | tdata->count = count; |
1873 | tdata->nr_frags = err; |
1874 | tdata->total_count = count; |
1875 | tdata->total_offset = offset; |
1876 | |
1877 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
1878 | "%s: offset %u, count %u,\n" |
1879 | "err %u, total_count %u, total_offset %u\n" , |
1880 | __func__, offset, count, err, tdata->total_count, tdata->total_offset); |
1881 | |
1882 | return 0; |
1883 | } |
1884 | |
1885 | int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op) |
1886 | { |
1887 | struct iscsi_conn *conn = task->conn; |
1888 | struct iscsi_session *session = task->conn->session; |
1889 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1890 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1891 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1892 | struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL; |
1893 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
1894 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1895 | struct scsi_cmnd *sc = task->sc; |
1896 | u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; |
1897 | u32 max_txdata_len = conn->max_xmit_dlength; |
1898 | u32 iso_tx_rsvd = 0, local_iso_info = 0; |
1899 | u32 last_tdata_offset, last_tdata_count; |
1900 | int err = 0; |
1901 | |
1902 | if (!tcp_task) { |
1903 | pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n" , |
1904 | task, tcp_task, tdata); |
1905 | return -ENOMEM; |
1906 | } |
1907 | if (!csk) { |
1908 | pr_err("task 0x%p, csk gone.\n" , task); |
1909 | return -EPIPE; |
1910 | } |
1911 | |
1912 | op &= ISCSI_OPCODE_MASK; |
1913 | |
1914 | tcp_task->dd_data = tdata; |
1915 | task->hdr = NULL; |
1916 | |
1917 | last_tdata_count = tdata->count; |
1918 | last_tdata_offset = tdata->offset; |
1919 | |
1920 | if ((op == ISCSI_OP_SCSI_DATA_OUT) || |
1921 | ((op == ISCSI_OP_SCSI_CMD) && |
1922 | (sc->sc_data_direction == DMA_TO_DEVICE))) { |
1923 | u32 remaining_data_tosend, dlimit = 0; |
1924 | u32 max_pdu_size, max_num_pdu, num_pdu; |
1925 | u32 count; |
1926 | |
1927 | /* Preserve conn->max_xmit_dlength because it can get updated to |
1928 | * ISO data size. |
1929 | */ |
1930 | if (task->state == ISCSI_TASK_PENDING) |
1931 | tdata->max_xmit_dlength = conn->max_xmit_dlength; |
1932 | |
1933 | if (!tdata->offset) |
1934 | cxgbi_task_data_sgl_check(task); |
1935 | |
1936 | remaining_data_tosend = |
1937 | tdata->dlen - tdata->offset - tdata->count; |
1938 | |
1939 | recalculate_sgl: |
1940 | max_txdata_len = tdata->max_xmit_dlength; |
1941 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
1942 | "tdata->dlen %u, remaining to send %u " |
1943 | "conn->max_xmit_dlength %u, " |
1944 | "tdata->max_xmit_dlength %u\n" , |
1945 | tdata->dlen, remaining_data_tosend, |
1946 | conn->max_xmit_dlength, tdata->max_xmit_dlength); |
1947 | |
1948 | if (cdev->skb_iso_txhdr && !csk->disable_iso && |
1949 | (remaining_data_tosend > tdata->max_xmit_dlength) && |
1950 | !(remaining_data_tosend % 4)) { |
1951 | u32 max_iso_data; |
1952 | |
1953 | if ((op == ISCSI_OP_SCSI_CMD) && |
1954 | session->initial_r2t_en) |
1955 | goto no_iso; |
1956 | |
1957 | max_pdu_size = tdata->max_xmit_dlength + |
1958 | ISCSI_PDU_NONPAYLOAD_LEN; |
1959 | max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB, |
1960 | csk->advmss); |
1961 | max_num_pdu = max_iso_data / max_pdu_size; |
1962 | |
1963 | num_pdu = (remaining_data_tosend + |
1964 | tdata->max_xmit_dlength - 1) / |
1965 | tdata->max_xmit_dlength; |
1966 | |
1967 | if (num_pdu > max_num_pdu) |
1968 | num_pdu = max_num_pdu; |
1969 | |
1970 | conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu; |
1971 | max_txdata_len = conn->max_xmit_dlength; |
1972 | iso_tx_rsvd = cdev->skb_iso_txhdr; |
1973 | local_iso_info = sizeof(struct cxgbi_iso_info); |
1974 | |
1975 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
1976 | "max_pdu_size %u, max_num_pdu %u, " |
1977 | "max_txdata %u, num_pdu %u\n" , |
1978 | max_pdu_size, max_num_pdu, |
1979 | max_txdata_len, num_pdu); |
1980 | } |
1981 | no_iso: |
1982 | count = min_t(u32, max_txdata_len, remaining_data_tosend); |
1983 | err = cxgbi_task_data_sgl_read(task, |
1984 | offset: tdata->offset + tdata->count, |
1985 | count, dlimit: &dlimit); |
1986 | if (unlikely(err < 0)) { |
1987 | log_debug(1 << CXGBI_DBG_ISCSI, |
1988 | "task 0x%p, tcp_task 0x%p, tdata 0x%p, " |
1989 | "sgl err %d, count %u, dlimit %u\n" , |
1990 | task, tcp_task, tdata, err, count, dlimit); |
1991 | if (dlimit) { |
1992 | remaining_data_tosend = |
1993 | rounddown(dlimit, |
1994 | tdata->max_xmit_dlength); |
1995 | if (!remaining_data_tosend) |
1996 | remaining_data_tosend = dlimit; |
1997 | |
1998 | dlimit = 0; |
1999 | |
2000 | conn->max_xmit_dlength = remaining_data_tosend; |
2001 | goto recalculate_sgl; |
2002 | } |
2003 | |
2004 | pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, " |
2005 | "sgl err %d\n" , |
2006 | task, tcp_task, tdata, err); |
2007 | goto ret_err; |
2008 | } |
2009 | |
2010 | if ((tdata->flags & CXGBI_TASK_SGL_COPY) || |
2011 | (tdata->nr_frags > MAX_SKB_FRAGS)) |
2012 | headroom += conn->max_xmit_dlength; |
2013 | } |
2014 | |
2015 | tdata->skb = alloc_skb(size: local_iso_info + cdev->skb_tx_rsvd + |
2016 | iso_tx_rsvd + headroom, GFP_ATOMIC); |
2017 | if (!tdata->skb) { |
2018 | tdata->count = last_tdata_count; |
2019 | tdata->offset = last_tdata_offset; |
2020 | err = -ENOMEM; |
2021 | goto ret_err; |
2022 | } |
2023 | |
2024 | skb_reserve(skb: tdata->skb, len: local_iso_info + cdev->skb_tx_rsvd + |
2025 | iso_tx_rsvd); |
2026 | |
2027 | if (task->sc) { |
2028 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
2029 | } else { |
2030 | task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC); |
2031 | if (!task->hdr) { |
2032 | __kfree_skb(skb: tdata->skb); |
2033 | tdata->skb = NULL; |
2034 | return -ENOMEM; |
2035 | } |
2036 | } |
2037 | |
2038 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; |
2039 | |
2040 | if (iso_tx_rsvd) |
2041 | cxgbi_skcb_set_flag(skb: tdata->skb, flag: SKCBF_TX_ISO); |
2042 | |
2043 | /* data_out uses scsi_cmd's itt */ |
2044 | if (op != ISCSI_OP_SCSI_DATA_OUT) |
2045 | task_reserve_itt(task, hdr_itt: &task->hdr->itt); |
2046 | |
2047 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2048 | "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n" , |
2049 | task, op, tdata->skb, cdev->skb_tx_rsvd, headroom, |
2050 | conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt)); |
2051 | |
2052 | return 0; |
2053 | |
2054 | ret_err: |
2055 | conn->max_xmit_dlength = tdata->max_xmit_dlength; |
2056 | return err; |
2057 | } |
2058 | EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); |
2059 | |
2060 | static int |
2061 | cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb, |
2062 | u32 count) |
2063 | { |
2064 | struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head; |
2065 | struct iscsi_r2t_info *r2t; |
2066 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2067 | struct iscsi_conn *conn = task->conn; |
2068 | struct iscsi_session *session = conn->session; |
2069 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
2070 | u32 burst_size = 0, r2t_dlength = 0, dlength; |
2071 | u32 max_pdu_len = tdata->max_xmit_dlength; |
2072 | u32 segment_offset = 0; |
2073 | u32 num_pdu; |
2074 | |
2075 | if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) |
2076 | return 0; |
2077 | |
2078 | memset(iso_info, 0, sizeof(struct cxgbi_iso_info)); |
2079 | |
2080 | if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) { |
2081 | iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE; |
2082 | burst_size = count; |
2083 | } |
2084 | |
2085 | dlength = ntoh24(task->hdr->dlength); |
2086 | dlength = min(dlength, max_pdu_len); |
2087 | hton24(task->hdr->dlength, dlength); |
2088 | |
2089 | num_pdu = (count + max_pdu_len - 1) / max_pdu_len; |
2090 | |
2091 | if (iscsi_task_has_unsol_data(task)) |
2092 | r2t = &task->unsol_r2t; |
2093 | else |
2094 | r2t = tcp_task->r2t; |
2095 | |
2096 | if (r2t) { |
2097 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2098 | "count %u, tdata->count %u, num_pdu %u," |
2099 | "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n" , |
2100 | count, tdata->count, num_pdu, task->hdr_len, |
2101 | r2t->data_length, r2t->sent); |
2102 | |
2103 | r2t_dlength = r2t->data_length - r2t->sent; |
2104 | segment_offset = r2t->sent; |
2105 | r2t->datasn += num_pdu - 1; |
2106 | } |
2107 | |
2108 | if (!r2t || !r2t->sent) |
2109 | iso_info->flags |= CXGBI_ISO_INFO_FSLICE; |
2110 | |
2111 | if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL) |
2112 | iso_info->flags |= CXGBI_ISO_INFO_LSLICE; |
2113 | |
2114 | task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; |
2115 | |
2116 | iso_info->op = task->hdr->opcode; |
2117 | iso_info->ahs = task->hdr->hlength; |
2118 | iso_info->num_pdu = num_pdu; |
2119 | iso_info->mpdu = max_pdu_len; |
2120 | iso_info->burst_size = (burst_size + r2t_dlength) >> 2; |
2121 | iso_info->len = count + task->hdr_len; |
2122 | iso_info->segment_offset = segment_offset; |
2123 | |
2124 | cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len; |
2125 | return 0; |
2126 | } |
2127 | |
2128 | static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) |
2129 | { |
2130 | if (hcrc || dcrc) { |
2131 | u8 submode = 0; |
2132 | |
2133 | if (hcrc) |
2134 | submode |= 1; |
2135 | if (dcrc) |
2136 | submode |= 2; |
2137 | cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; |
2138 | } else |
2139 | cxgbi_skcb_tx_ulp_mode(skb) = 0; |
2140 | } |
2141 | |
2142 | static struct page *rsvd_page; |
2143 | |
2144 | int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, |
2145 | unsigned int count) |
2146 | { |
2147 | struct iscsi_conn *conn = task->conn; |
2148 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
2149 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2150 | struct sk_buff *skb; |
2151 | struct scsi_cmnd *sc = task->sc; |
2152 | u32 expected_count, expected_offset; |
2153 | u32 datalen = count, dlimit = 0; |
2154 | u32 i, padlen = iscsi_padding(len: count); |
2155 | struct page *pg; |
2156 | int err; |
2157 | |
2158 | if (!tcp_task || (tcp_task->dd_data != tdata)) { |
2159 | pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n" , |
2160 | task, task->sc, tcp_task, |
2161 | tcp_task ? tcp_task->dd_data : NULL, tdata); |
2162 | return -EINVAL; |
2163 | } |
2164 | skb = tdata->skb; |
2165 | |
2166 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2167 | "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n" , |
2168 | task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, |
2169 | be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count); |
2170 | |
2171 | skb_put(skb, len: task->hdr_len); |
2172 | tx_skb_setmode(skb, hcrc: conn->hdrdgst_en, dcrc: datalen ? conn->datadgst_en : 0); |
2173 | if (!count) { |
2174 | tdata->count = count; |
2175 | tdata->offset = offset; |
2176 | tdata->nr_frags = 0; |
2177 | tdata->total_offset = 0; |
2178 | tdata->total_count = 0; |
2179 | if (tdata->max_xmit_dlength) |
2180 | conn->max_xmit_dlength = tdata->max_xmit_dlength; |
2181 | cxgbi_skcb_clear_flag(skb, flag: SKCBF_TX_ISO); |
2182 | return 0; |
2183 | } |
2184 | |
2185 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2186 | "data->total_count %u, tdata->total_offset %u\n" , |
2187 | tdata->total_count, tdata->total_offset); |
2188 | |
2189 | expected_count = tdata->total_count; |
2190 | expected_offset = tdata->total_offset; |
2191 | |
2192 | if ((count != expected_count) || |
2193 | (offset != expected_offset)) { |
2194 | err = cxgbi_task_data_sgl_read(task, offset, count, dlimit: &dlimit); |
2195 | if (err < 0) { |
2196 | pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p " |
2197 | "dlimit %u, sgl err %d.\n" , task, task->sc, |
2198 | tcp_task, tcp_task ? tcp_task->dd_data : NULL, |
2199 | tdata, dlimit, err); |
2200 | return err; |
2201 | } |
2202 | } |
2203 | |
2204 | /* Restore original value of conn->max_xmit_dlength because |
2205 | * it can get updated to ISO data size. |
2206 | */ |
2207 | conn->max_xmit_dlength = tdata->max_xmit_dlength; |
2208 | |
2209 | if (sc) { |
2210 | struct page_frag *frag = tdata->frags; |
2211 | |
2212 | if ((tdata->flags & CXGBI_TASK_SGL_COPY) || |
2213 | (tdata->nr_frags > MAX_SKB_FRAGS) || |
2214 | (padlen && (tdata->nr_frags == |
2215 | MAX_SKB_FRAGS))) { |
2216 | char *dst = skb->data + task->hdr_len; |
2217 | |
2218 | /* data fits in the skb's headroom */ |
2219 | for (i = 0; i < tdata->nr_frags; i++, frag++) { |
2220 | char *src = kmap_atomic(page: frag->page); |
2221 | |
2222 | memcpy(dst, src + frag->offset, frag->size); |
2223 | dst += frag->size; |
2224 | kunmap_atomic(src); |
2225 | } |
2226 | |
2227 | if (padlen) { |
2228 | memset(dst, 0, padlen); |
2229 | padlen = 0; |
2230 | } |
2231 | skb_put(skb, len: count + padlen); |
2232 | } else { |
2233 | for (i = 0; i < tdata->nr_frags; i++, frag++) { |
2234 | get_page(page: frag->page); |
2235 | skb_fill_page_desc(skb, i, page: frag->page, |
2236 | off: frag->offset, size: frag->size); |
2237 | } |
2238 | |
2239 | skb->len += count; |
2240 | skb->data_len += count; |
2241 | skb->truesize += count; |
2242 | } |
2243 | } else { |
2244 | pg = virt_to_head_page(x: task->data); |
2245 | get_page(page: pg); |
2246 | skb_fill_page_desc(skb, i: 0, page: pg, |
2247 | off: task->data - (char *)page_address(pg), |
2248 | size: count); |
2249 | skb->len += count; |
2250 | skb->data_len += count; |
2251 | skb->truesize += count; |
2252 | } |
2253 | |
2254 | if (padlen) { |
2255 | get_page(page: rsvd_page); |
2256 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
2257 | page: rsvd_page, off: 0, size: padlen); |
2258 | |
2259 | skb->data_len += padlen; |
2260 | skb->truesize += padlen; |
2261 | skb->len += padlen; |
2262 | } |
2263 | |
2264 | if (likely(count > tdata->max_xmit_dlength)) |
2265 | cxgbi_prep_iso_info(task, skb, count); |
2266 | else |
2267 | cxgbi_skcb_clear_flag(skb, flag: SKCBF_TX_ISO); |
2268 | |
2269 | return 0; |
2270 | } |
2271 | EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); |
2272 | |
2273 | static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb) |
2274 | { |
2275 | struct cxgbi_device *cdev = csk->cdev; |
2276 | struct cxgbi_iso_info *iso_cpl; |
2277 | u32 frags = skb_shinfo(skb)->nr_frags; |
2278 | u32 , num_pdu, hdr_len; |
2279 | u32 iso_tx_rsvd = 0; |
2280 | |
2281 | if (csk->state != CTP_ESTABLISHED) { |
2282 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2283 | "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n" , |
2284 | csk, csk->state, csk->flags, csk->tid); |
2285 | return -EPIPE; |
2286 | } |
2287 | |
2288 | if (csk->err) { |
2289 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2290 | "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n" , |
2291 | csk, csk->state, csk->flags, csk->tid, csk->err); |
2292 | return -EPIPE; |
2293 | } |
2294 | |
2295 | if ((cdev->flags & CXGBI_FLAG_DEV_T3) && |
2296 | before(seq1: (csk->snd_win + csk->snd_una), seq2: csk->write_seq)) { |
2297 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2298 | "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n" , |
2299 | csk, csk->state, csk->flags, csk->tid, csk->write_seq, |
2300 | csk->snd_una, csk->snd_win); |
2301 | return -ENOBUFS; |
2302 | } |
2303 | |
2304 | if (cxgbi_skcb_test_flag(skb, flag: SKCBF_TX_ISO)) |
2305 | iso_tx_rsvd = cdev->skb_iso_txhdr; |
2306 | |
2307 | if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) { |
2308 | pr_err("csk 0x%p, skb head %u < %u.\n" , |
2309 | csk, skb_headroom(skb), cdev->skb_tx_rsvd); |
2310 | return -EINVAL; |
2311 | } |
2312 | |
2313 | if (skb->len != skb->data_len) |
2314 | frags++; |
2315 | |
2316 | if (frags >= SKB_WR_LIST_SIZE) { |
2317 | pr_err("csk 0x%p, frags %u, %u,%u >%u.\n" , |
2318 | csk, skb_shinfo(skb)->nr_frags, skb->len, |
2319 | skb->data_len, (unsigned int)SKB_WR_LIST_SIZE); |
2320 | return -EINVAL; |
2321 | } |
2322 | |
2323 | cxgbi_skcb_set_flag(skb, flag: SKCBF_TX_NEED_HDR); |
2324 | skb_reset_transport_header(skb); |
2325 | cxgbi_sock_skb_entail(csk, skb); |
2326 | |
2327 | extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)); |
2328 | |
2329 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) { |
2330 | iso_cpl = (struct cxgbi_iso_info *)skb->head; |
2331 | num_pdu = iso_cpl->num_pdu; |
2332 | hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); |
2333 | extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) * |
2334 | num_pdu) + (hdr_len * (num_pdu - 1)); |
2335 | } |
2336 | |
2337 | csk->write_seq += (skb->len + extra_len); |
2338 | |
2339 | return 0; |
2340 | } |
2341 | |
2342 | static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb) |
2343 | { |
2344 | struct cxgbi_device *cdev = csk->cdev; |
2345 | int len = skb->len; |
2346 | int err; |
2347 | |
2348 | spin_lock_bh(lock: &csk->lock); |
2349 | err = cxgbi_sock_tx_queue_up(csk, skb); |
2350 | if (err < 0) { |
2351 | spin_unlock_bh(lock: &csk->lock); |
2352 | return err; |
2353 | } |
2354 | |
2355 | if (likely(skb_queue_len(&csk->write_queue))) |
2356 | cdev->csk_push_tx_frames(csk, 0); |
2357 | spin_unlock_bh(lock: &csk->lock); |
2358 | return len; |
2359 | } |
2360 | |
2361 | int cxgbi_conn_xmit_pdu(struct iscsi_task *task) |
2362 | { |
2363 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
2364 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2365 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
2366 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2367 | struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; |
2368 | struct sk_buff *skb; |
2369 | struct cxgbi_sock *csk = NULL; |
2370 | u32 pdulen = 0; |
2371 | u32 datalen; |
2372 | int err; |
2373 | |
2374 | if (!tcp_task || (tcp_task->dd_data != tdata)) { |
2375 | pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n" , |
2376 | task, task->sc, tcp_task, |
2377 | tcp_task ? tcp_task->dd_data : NULL, tdata); |
2378 | return -EINVAL; |
2379 | } |
2380 | |
2381 | skb = tdata->skb; |
2382 | if (!skb) { |
2383 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2384 | "task 0x%p, skb NULL.\n" , task); |
2385 | return 0; |
2386 | } |
2387 | |
2388 | if (cconn && cconn->cep) |
2389 | csk = cconn->cep->csk; |
2390 | |
2391 | if (!csk) { |
2392 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2393 | "task 0x%p, csk gone.\n" , task); |
2394 | return -EPIPE; |
2395 | } |
2396 | |
2397 | tdata->skb = NULL; |
2398 | datalen = skb->data_len; |
2399 | |
2400 | /* write ppod first if using ofldq to write ppod */ |
2401 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { |
2402 | struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); |
2403 | |
2404 | ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; |
2405 | if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) |
2406 | pr_err("task 0x%p, ppod writing using ofldq failed.\n" , |
2407 | task); |
2408 | /* continue. Let fl get the data */ |
2409 | } |
2410 | |
2411 | if (!task->sc) |
2412 | memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX); |
2413 | |
2414 | err = cxgbi_sock_send_skb(csk, skb); |
2415 | if (err > 0) { |
2416 | pdulen += err; |
2417 | |
2418 | log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n" , |
2419 | task, task->sc, err); |
2420 | |
2421 | if (task->conn->hdrdgst_en) |
2422 | pdulen += ISCSI_DIGEST_SIZE; |
2423 | |
2424 | if (datalen && task->conn->datadgst_en) |
2425 | pdulen += ISCSI_DIGEST_SIZE; |
2426 | |
2427 | task->conn->txdata_octets += pdulen; |
2428 | |
2429 | if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) { |
2430 | if (time_after(jiffies, csk->prev_iso_ts + HZ)) { |
2431 | csk->disable_iso = false; |
2432 | csk->prev_iso_ts = 0; |
2433 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2434 | "enable iso: csk 0x%p\n" , csk); |
2435 | } |
2436 | } |
2437 | |
2438 | return 0; |
2439 | } |
2440 | |
2441 | if (err == -EAGAIN || err == -ENOBUFS) { |
2442 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2443 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n" , |
2444 | task, skb, skb->len, skb->data_len, err); |
2445 | /* reset skb to send when we are called again */ |
2446 | tdata->skb = skb; |
2447 | |
2448 | if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) && |
2449 | (csk->no_tx_credits++ >= 2)) { |
2450 | csk->disable_iso = true; |
2451 | csk->prev_iso_ts = jiffies; |
2452 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2453 | "disable iso:csk 0x%p, ts:%lu\n" , |
2454 | csk, csk->prev_iso_ts); |
2455 | } |
2456 | |
2457 | return err; |
2458 | } |
2459 | |
2460 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2461 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n" , |
2462 | task->itt, skb, skb->len, skb->data_len, err); |
2463 | __kfree_skb(skb); |
2464 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n" , err); |
2465 | iscsi_conn_failure(conn: task->conn, err: ISCSI_ERR_XMIT_FAILED); |
2466 | return err; |
2467 | } |
2468 | EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); |
2469 | |
2470 | void cxgbi_cleanup_task(struct iscsi_task *task) |
2471 | { |
2472 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
2473 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2474 | |
2475 | if (!tcp_task || (tcp_task->dd_data != tdata)) { |
2476 | pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n" , |
2477 | task, task->sc, tcp_task, |
2478 | tcp_task ? tcp_task->dd_data : NULL, tdata); |
2479 | return; |
2480 | } |
2481 | |
2482 | log_debug(1 << CXGBI_DBG_ISCSI, |
2483 | "task 0x%p, skb 0x%p, itt 0x%x.\n" , |
2484 | task, tdata->skb, task->hdr_itt); |
2485 | |
2486 | tcp_task->dd_data = NULL; |
2487 | |
2488 | if (!task->sc) |
2489 | kfree(objp: task->hdr); |
2490 | task->hdr = NULL; |
2491 | |
2492 | /* never reached the xmit task callout */ |
2493 | if (tdata->skb) { |
2494 | __kfree_skb(skb: tdata->skb); |
2495 | tdata->skb = NULL; |
2496 | } |
2497 | |
2498 | task_release_itt(task, hdr_itt: task->hdr_itt); |
2499 | memset(tdata, 0, sizeof(*tdata)); |
2500 | |
2501 | iscsi_tcp_cleanup_task(task); |
2502 | } |
2503 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); |
2504 | |
2505 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, |
2506 | struct iscsi_stats *stats) |
2507 | { |
2508 | struct iscsi_conn *conn = cls_conn->dd_data; |
2509 | |
2510 | stats->txdata_octets = conn->txdata_octets; |
2511 | stats->rxdata_octets = conn->rxdata_octets; |
2512 | stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; |
2513 | stats->dataout_pdus = conn->dataout_pdus_cnt; |
2514 | stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; |
2515 | stats->datain_pdus = conn->datain_pdus_cnt; |
2516 | stats->r2t_pdus = conn->r2t_pdus_cnt; |
2517 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; |
2518 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; |
2519 | stats->digest_err = 0; |
2520 | stats->timeout_err = 0; |
2521 | stats->custom_length = 1; |
2522 | strcpy(p: stats->custom[0].desc, q: "eh_abort_cnt" ); |
2523 | stats->custom[0].value = conn->eh_abort_cnt; |
2524 | } |
2525 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); |
2526 | |
2527 | static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) |
2528 | { |
2529 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2530 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2531 | struct cxgbi_device *cdev = cconn->chba->cdev; |
2532 | unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); |
2533 | unsigned int max_def = 512 * MAX_SKB_FRAGS; |
2534 | unsigned int max = max(max_def, headroom); |
2535 | |
2536 | max = min(cconn->chba->cdev->tx_max_size, max); |
2537 | if (conn->max_xmit_dlength) |
2538 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); |
2539 | else |
2540 | conn->max_xmit_dlength = max; |
2541 | cxgbi_align_pdu_size(conn->max_xmit_dlength); |
2542 | |
2543 | return 0; |
2544 | } |
2545 | |
2546 | static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) |
2547 | { |
2548 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2549 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2550 | unsigned int max = cconn->chba->cdev->rx_max_size; |
2551 | |
2552 | cxgbi_align_pdu_size(max); |
2553 | |
2554 | if (conn->max_recv_dlength) { |
2555 | if (conn->max_recv_dlength > max) { |
2556 | pr_err("MaxRecvDataSegmentLength %u > %u.\n" , |
2557 | conn->max_recv_dlength, max); |
2558 | return -EINVAL; |
2559 | } |
2560 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); |
2561 | cxgbi_align_pdu_size(conn->max_recv_dlength); |
2562 | } else |
2563 | conn->max_recv_dlength = max; |
2564 | |
2565 | return 0; |
2566 | } |
2567 | |
2568 | int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, |
2569 | enum iscsi_param param, char *buf, int buflen) |
2570 | { |
2571 | struct iscsi_conn *conn = cls_conn->dd_data; |
2572 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2573 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2574 | struct cxgbi_sock *csk = cconn->cep->csk; |
2575 | int err; |
2576 | |
2577 | log_debug(1 << CXGBI_DBG_ISCSI, |
2578 | "cls_conn 0x%p, param %d, buf(%d) %s.\n" , |
2579 | cls_conn, param, buflen, buf); |
2580 | |
2581 | switch (param) { |
2582 | case ISCSI_PARAM_HDRDGST_EN: |
2583 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2584 | if (!err && conn->hdrdgst_en) |
2585 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2586 | conn->hdrdgst_en, |
2587 | conn->datadgst_en); |
2588 | break; |
2589 | case ISCSI_PARAM_DATADGST_EN: |
2590 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2591 | if (!err && conn->datadgst_en) |
2592 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2593 | conn->hdrdgst_en, |
2594 | conn->datadgst_en); |
2595 | break; |
2596 | case ISCSI_PARAM_MAX_R2T: |
2597 | return iscsi_tcp_set_max_r2t(conn, buf); |
2598 | case ISCSI_PARAM_MAX_RECV_DLENGTH: |
2599 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2600 | if (!err) |
2601 | err = cxgbi_conn_max_recv_dlength(conn); |
2602 | break; |
2603 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: |
2604 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2605 | if (!err) |
2606 | err = cxgbi_conn_max_xmit_dlength(conn); |
2607 | break; |
2608 | default: |
2609 | return iscsi_set_param(cls_conn, param, buf, buflen); |
2610 | } |
2611 | return err; |
2612 | } |
2613 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); |
2614 | |
2615 | int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, |
2616 | char *buf) |
2617 | { |
2618 | struct cxgbi_endpoint *cep = ep->dd_data; |
2619 | struct cxgbi_sock *csk; |
2620 | |
2621 | log_debug(1 << CXGBI_DBG_ISCSI, |
2622 | "cls_conn 0x%p, param %d.\n" , ep, param); |
2623 | |
2624 | switch (param) { |
2625 | case ISCSI_PARAM_CONN_PORT: |
2626 | case ISCSI_PARAM_CONN_ADDRESS: |
2627 | if (!cep) |
2628 | return -ENOTCONN; |
2629 | |
2630 | csk = cep->csk; |
2631 | if (!csk) |
2632 | return -ENOTCONN; |
2633 | |
2634 | return iscsi_conn_get_addr_param(addr: (struct sockaddr_storage *) |
2635 | &csk->daddr, param, buf); |
2636 | default: |
2637 | break; |
2638 | } |
2639 | return -ENOSYS; |
2640 | } |
2641 | EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); |
2642 | |
2643 | struct iscsi_cls_conn * |
2644 | cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) |
2645 | { |
2646 | struct iscsi_cls_conn *cls_conn; |
2647 | struct iscsi_conn *conn; |
2648 | struct iscsi_tcp_conn *tcp_conn; |
2649 | struct cxgbi_conn *cconn; |
2650 | |
2651 | cls_conn = iscsi_tcp_conn_setup(cls_session, dd_data_size: sizeof(*cconn), conn_idx: cid); |
2652 | if (!cls_conn) |
2653 | return NULL; |
2654 | |
2655 | conn = cls_conn->dd_data; |
2656 | tcp_conn = conn->dd_data; |
2657 | cconn = tcp_conn->dd_data; |
2658 | cconn->iconn = conn; |
2659 | |
2660 | log_debug(1 << CXGBI_DBG_ISCSI, |
2661 | "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n" , |
2662 | cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); |
2663 | |
2664 | return cls_conn; |
2665 | } |
2666 | EXPORT_SYMBOL_GPL(cxgbi_create_conn); |
2667 | |
2668 | int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, |
2669 | struct iscsi_cls_conn *cls_conn, |
2670 | u64 transport_eph, int is_leading) |
2671 | { |
2672 | struct iscsi_conn *conn = cls_conn->dd_data; |
2673 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2674 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2675 | struct cxgbi_ppm *ppm; |
2676 | struct iscsi_endpoint *ep; |
2677 | struct cxgbi_endpoint *cep; |
2678 | struct cxgbi_sock *csk; |
2679 | int err; |
2680 | |
2681 | ep = iscsi_lookup_endpoint(handle: transport_eph); |
2682 | if (!ep) |
2683 | return -EINVAL; |
2684 | |
2685 | /* setup ddp pagesize */ |
2686 | cep = ep->dd_data; |
2687 | csk = cep->csk; |
2688 | |
2689 | ppm = csk->cdev->cdev2ppm(csk->cdev); |
2690 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, |
2691 | ppm->tformat.pgsz_idx_dflt); |
2692 | if (err < 0) |
2693 | goto put_ep; |
2694 | |
2695 | err = iscsi_conn_bind(cls_session, cls_conn, is_leading); |
2696 | if (err) { |
2697 | err = -EINVAL; |
2698 | goto put_ep; |
2699 | } |
2700 | |
2701 | /* calculate the tag idx bits needed for this conn based on cmds_max */ |
2702 | cconn->task_idx_bits = (__ilog2_u32(n: conn->session->cmds_max - 1)) + 1; |
2703 | |
2704 | write_lock_bh(&csk->callback_lock); |
2705 | csk->user_data = conn; |
2706 | cconn->chba = cep->chba; |
2707 | cconn->cep = cep; |
2708 | cep->cconn = cconn; |
2709 | write_unlock_bh(&csk->callback_lock); |
2710 | |
2711 | cxgbi_conn_max_xmit_dlength(conn); |
2712 | cxgbi_conn_max_recv_dlength(conn); |
2713 | |
2714 | log_debug(1 << CXGBI_DBG_ISCSI, |
2715 | "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n" , |
2716 | cls_session, cls_conn, ep, cconn, csk); |
2717 | /* init recv engine */ |
2718 | iscsi_tcp_hdr_recv_prep(tcp_conn); |
2719 | |
2720 | put_ep: |
2721 | iscsi_put_endpoint(ep); |
2722 | return err; |
2723 | } |
2724 | EXPORT_SYMBOL_GPL(cxgbi_bind_conn); |
2725 | |
2726 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, |
2727 | u16 cmds_max, u16 qdepth, |
2728 | u32 initial_cmdsn) |
2729 | { |
2730 | struct cxgbi_endpoint *cep; |
2731 | struct cxgbi_hba *chba; |
2732 | struct Scsi_Host *shost; |
2733 | struct iscsi_cls_session *cls_session; |
2734 | struct iscsi_session *session; |
2735 | |
2736 | if (!ep) { |
2737 | pr_err("missing endpoint.\n" ); |
2738 | return NULL; |
2739 | } |
2740 | |
2741 | cep = ep->dd_data; |
2742 | chba = cep->chba; |
2743 | shost = chba->shost; |
2744 | |
2745 | BUG_ON(chba != iscsi_host_priv(shost)); |
2746 | |
2747 | cls_session = iscsi_session_setup(chba->cdev->itp, shost, |
2748 | cmds_max, 0, |
2749 | sizeof(struct iscsi_tcp_task) + |
2750 | sizeof(struct cxgbi_task_data), |
2751 | initial_cmdsn, ISCSI_MAX_TARGET); |
2752 | if (!cls_session) |
2753 | return NULL; |
2754 | |
2755 | session = cls_session->dd_data; |
2756 | if (iscsi_tcp_r2tpool_alloc(session)) |
2757 | goto remove_session; |
2758 | |
2759 | log_debug(1 << CXGBI_DBG_ISCSI, |
2760 | "ep 0x%p, cls sess 0x%p.\n" , ep, cls_session); |
2761 | return cls_session; |
2762 | |
2763 | remove_session: |
2764 | iscsi_session_teardown(cls_session); |
2765 | return NULL; |
2766 | } |
2767 | EXPORT_SYMBOL_GPL(cxgbi_create_session); |
2768 | |
2769 | void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) |
2770 | { |
2771 | log_debug(1 << CXGBI_DBG_ISCSI, |
2772 | "cls sess 0x%p.\n" , cls_session); |
2773 | |
2774 | iscsi_tcp_r2tpool_free(session: cls_session->dd_data); |
2775 | iscsi_session_teardown(cls_session); |
2776 | } |
2777 | EXPORT_SYMBOL_GPL(cxgbi_destroy_session); |
2778 | |
2779 | int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, |
2780 | char *buf, int buflen) |
2781 | { |
2782 | struct cxgbi_hba *chba = iscsi_host_priv(shost); |
2783 | |
2784 | if (!chba->ndev) { |
2785 | shost_printk(KERN_ERR, shost, "Could not get host param. " |
2786 | "netdev for host not set.\n" ); |
2787 | return -ENODEV; |
2788 | } |
2789 | |
2790 | log_debug(1 << CXGBI_DBG_ISCSI, |
2791 | "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n" , |
2792 | shost, chba, chba->ndev->name, param, buflen, buf); |
2793 | |
2794 | switch (param) { |
2795 | case ISCSI_HOST_PARAM_IPADDRESS: |
2796 | { |
2797 | __be32 addr = in_aton(str: buf); |
2798 | log_debug(1 << CXGBI_DBG_ISCSI, |
2799 | "hba %s, req. ipv4 %pI4.\n" , chba->ndev->name, &addr); |
2800 | cxgbi_set_iscsi_ipv4(chba, ipaddr: addr); |
2801 | return 0; |
2802 | } |
2803 | case ISCSI_HOST_PARAM_HWADDRESS: |
2804 | case ISCSI_HOST_PARAM_NETDEV_NAME: |
2805 | return 0; |
2806 | default: |
2807 | return iscsi_host_set_param(shost, param, buf, buflen); |
2808 | } |
2809 | } |
2810 | EXPORT_SYMBOL_GPL(cxgbi_set_host_param); |
2811 | |
2812 | int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, |
2813 | char *buf) |
2814 | { |
2815 | struct cxgbi_hba *chba = iscsi_host_priv(shost); |
2816 | int len = 0; |
2817 | |
2818 | if (!chba->ndev) { |
2819 | shost_printk(KERN_ERR, shost, "Could not get host param. " |
2820 | "netdev for host not set.\n" ); |
2821 | return -ENODEV; |
2822 | } |
2823 | |
2824 | log_debug(1 << CXGBI_DBG_ISCSI, |
2825 | "shost 0x%p, hba 0x%p,%s, param %d.\n" , |
2826 | shost, chba, chba->ndev->name, param); |
2827 | |
2828 | switch (param) { |
2829 | case ISCSI_HOST_PARAM_HWADDRESS: |
2830 | len = sysfs_format_mac(buf, addr: chba->ndev->dev_addr, len: 6); |
2831 | break; |
2832 | case ISCSI_HOST_PARAM_NETDEV_NAME: |
2833 | len = sprintf(buf, fmt: "%s\n" , chba->ndev->name); |
2834 | break; |
2835 | case ISCSI_HOST_PARAM_IPADDRESS: |
2836 | { |
2837 | struct cxgbi_sock *csk = find_sock_on_port(cdev: chba->cdev, |
2838 | port_id: chba->port_id); |
2839 | if (csk) { |
2840 | len = sprintf(buf, fmt: "%pIS" , |
2841 | (struct sockaddr *)&csk->saddr); |
2842 | } |
2843 | log_debug(1 << CXGBI_DBG_ISCSI, |
2844 | "hba %s, addr %s.\n" , chba->ndev->name, buf); |
2845 | break; |
2846 | } |
2847 | default: |
2848 | return iscsi_host_get_param(shost, param, buf); |
2849 | } |
2850 | |
2851 | return len; |
2852 | } |
2853 | EXPORT_SYMBOL_GPL(cxgbi_get_host_param); |
2854 | |
2855 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, |
2856 | struct sockaddr *dst_addr, |
2857 | int non_blocking) |
2858 | { |
2859 | struct iscsi_endpoint *ep; |
2860 | struct cxgbi_endpoint *cep; |
2861 | struct cxgbi_hba *hba = NULL; |
2862 | struct cxgbi_sock *csk; |
2863 | int ifindex = 0; |
2864 | int err = -EINVAL; |
2865 | |
2866 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2867 | "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n" , |
2868 | shost, non_blocking, dst_addr); |
2869 | |
2870 | if (shost) { |
2871 | hba = iscsi_host_priv(shost); |
2872 | if (!hba) { |
2873 | pr_info("shost 0x%p, priv NULL.\n" , shost); |
2874 | goto err_out; |
2875 | } |
2876 | } |
2877 | |
2878 | check_route: |
2879 | if (dst_addr->sa_family == AF_INET) { |
2880 | csk = cxgbi_check_route(dst_addr, ifindex); |
2881 | #if IS_ENABLED(CONFIG_IPV6) |
2882 | } else if (dst_addr->sa_family == AF_INET6) { |
2883 | csk = cxgbi_check_route6(dst_addr, ifindex); |
2884 | #endif |
2885 | } else { |
2886 | pr_info("address family 0x%x NOT supported.\n" , |
2887 | dst_addr->sa_family); |
2888 | err = -EAFNOSUPPORT; |
2889 | return (struct iscsi_endpoint *)ERR_PTR(error: err); |
2890 | } |
2891 | |
2892 | if (IS_ERR(ptr: csk)) |
2893 | return (struct iscsi_endpoint *)csk; |
2894 | cxgbi_sock_get(csk); |
2895 | |
2896 | if (!hba) |
2897 | hba = csk->cdev->hbas[csk->port_id]; |
2898 | else if (hba != csk->cdev->hbas[csk->port_id]) { |
2899 | if (ifindex != hba->ndev->ifindex) { |
2900 | cxgbi_sock_put(csk); |
2901 | cxgbi_sock_closed(csk); |
2902 | ifindex = hba->ndev->ifindex; |
2903 | goto check_route; |
2904 | } |
2905 | |
2906 | pr_info("Could not connect through requested host %u" |
2907 | "hba 0x%p != 0x%p (%u).\n" , |
2908 | shost->host_no, hba, |
2909 | csk->cdev->hbas[csk->port_id], csk->port_id); |
2910 | err = -ENOSPC; |
2911 | goto release_conn; |
2912 | } |
2913 | |
2914 | err = sock_get_port(csk); |
2915 | if (err) |
2916 | goto release_conn; |
2917 | |
2918 | cxgbi_sock_set_state(csk, state: CTP_CONNECTING); |
2919 | err = csk->cdev->csk_init_act_open(csk); |
2920 | if (err) |
2921 | goto release_conn; |
2922 | |
2923 | if (cxgbi_sock_is_closing(csk)) { |
2924 | err = -ENOSPC; |
2925 | pr_info("csk 0x%p is closing.\n" , csk); |
2926 | goto release_conn; |
2927 | } |
2928 | |
2929 | ep = iscsi_create_endpoint(dd_size: sizeof(*cep)); |
2930 | if (!ep) { |
2931 | err = -ENOMEM; |
2932 | pr_info("iscsi alloc ep, OOM.\n" ); |
2933 | goto release_conn; |
2934 | } |
2935 | |
2936 | cep = ep->dd_data; |
2937 | cep->csk = csk; |
2938 | cep->chba = hba; |
2939 | |
2940 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2941 | "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n" , |
2942 | ep, cep, csk, hba, hba->ndev->name); |
2943 | return ep; |
2944 | |
2945 | release_conn: |
2946 | cxgbi_sock_put(csk); |
2947 | cxgbi_sock_closed(csk); |
2948 | err_out: |
2949 | return ERR_PTR(error: err); |
2950 | } |
2951 | EXPORT_SYMBOL_GPL(cxgbi_ep_connect); |
2952 | |
2953 | int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) |
2954 | { |
2955 | struct cxgbi_endpoint *cep = ep->dd_data; |
2956 | struct cxgbi_sock *csk = cep->csk; |
2957 | |
2958 | if (!cxgbi_sock_is_established(csk)) |
2959 | return 0; |
2960 | return 1; |
2961 | } |
2962 | EXPORT_SYMBOL_GPL(cxgbi_ep_poll); |
2963 | |
2964 | void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) |
2965 | { |
2966 | struct cxgbi_endpoint *cep = ep->dd_data; |
2967 | struct cxgbi_conn *cconn = cep->cconn; |
2968 | struct cxgbi_sock *csk = cep->csk; |
2969 | |
2970 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2971 | "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n" , |
2972 | ep, cep, cconn, csk, csk->state, csk->flags); |
2973 | |
2974 | if (cconn && cconn->iconn) { |
2975 | write_lock_bh(&csk->callback_lock); |
2976 | cep->csk->user_data = NULL; |
2977 | cconn->cep = NULL; |
2978 | write_unlock_bh(&csk->callback_lock); |
2979 | } |
2980 | iscsi_destroy_endpoint(ep); |
2981 | |
2982 | if (likely(csk->state >= CTP_ESTABLISHED)) |
2983 | need_active_close(csk); |
2984 | else |
2985 | cxgbi_sock_closed(csk); |
2986 | |
2987 | cxgbi_sock_put(csk); |
2988 | } |
2989 | EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); |
2990 | |
2991 | int cxgbi_iscsi_init(struct iscsi_transport *itp, |
2992 | struct scsi_transport_template **stt) |
2993 | { |
2994 | *stt = iscsi_register_transport(tt: itp); |
2995 | if (*stt == NULL) { |
2996 | pr_err("unable to register %s transport 0x%p.\n" , |
2997 | itp->name, itp); |
2998 | return -ENODEV; |
2999 | } |
3000 | log_debug(1 << CXGBI_DBG_ISCSI, |
3001 | "%s, registered iscsi transport 0x%p.\n" , |
3002 | itp->name, stt); |
3003 | return 0; |
3004 | } |
3005 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); |
3006 | |
3007 | void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, |
3008 | struct scsi_transport_template **stt) |
3009 | { |
3010 | if (*stt) { |
3011 | log_debug(1 << CXGBI_DBG_ISCSI, |
3012 | "de-register transport 0x%p, %s, stt 0x%p.\n" , |
3013 | itp, itp->name, *stt); |
3014 | *stt = NULL; |
3015 | iscsi_unregister_transport(tt: itp); |
3016 | } |
3017 | } |
3018 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); |
3019 | |
3020 | umode_t cxgbi_attr_is_visible(int param_type, int param) |
3021 | { |
3022 | switch (param_type) { |
3023 | case ISCSI_HOST_PARAM: |
3024 | switch (param) { |
3025 | case ISCSI_HOST_PARAM_NETDEV_NAME: |
3026 | case ISCSI_HOST_PARAM_HWADDRESS: |
3027 | case ISCSI_HOST_PARAM_IPADDRESS: |
3028 | case ISCSI_HOST_PARAM_INITIATOR_NAME: |
3029 | return S_IRUGO; |
3030 | default: |
3031 | return 0; |
3032 | } |
3033 | case ISCSI_PARAM: |
3034 | switch (param) { |
3035 | case ISCSI_PARAM_MAX_RECV_DLENGTH: |
3036 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: |
3037 | case ISCSI_PARAM_HDRDGST_EN: |
3038 | case ISCSI_PARAM_DATADGST_EN: |
3039 | case ISCSI_PARAM_CONN_ADDRESS: |
3040 | case ISCSI_PARAM_CONN_PORT: |
3041 | case ISCSI_PARAM_EXP_STATSN: |
3042 | case ISCSI_PARAM_PERSISTENT_ADDRESS: |
3043 | case ISCSI_PARAM_PERSISTENT_PORT: |
3044 | case ISCSI_PARAM_PING_TMO: |
3045 | case ISCSI_PARAM_RECV_TMO: |
3046 | case ISCSI_PARAM_INITIAL_R2T_EN: |
3047 | case ISCSI_PARAM_MAX_R2T: |
3048 | case ISCSI_PARAM_IMM_DATA_EN: |
3049 | case ISCSI_PARAM_FIRST_BURST: |
3050 | case ISCSI_PARAM_MAX_BURST: |
3051 | case ISCSI_PARAM_PDU_INORDER_EN: |
3052 | case ISCSI_PARAM_DATASEQ_INORDER_EN: |
3053 | case ISCSI_PARAM_ERL: |
3054 | case ISCSI_PARAM_TARGET_NAME: |
3055 | case ISCSI_PARAM_TPGT: |
3056 | case ISCSI_PARAM_USERNAME: |
3057 | case ISCSI_PARAM_PASSWORD: |
3058 | case ISCSI_PARAM_USERNAME_IN: |
3059 | case ISCSI_PARAM_PASSWORD_IN: |
3060 | case ISCSI_PARAM_FAST_ABORT: |
3061 | case ISCSI_PARAM_ABORT_TMO: |
3062 | case ISCSI_PARAM_LU_RESET_TMO: |
3063 | case ISCSI_PARAM_TGT_RESET_TMO: |
3064 | case ISCSI_PARAM_IFACE_NAME: |
3065 | case ISCSI_PARAM_INITIATOR_NAME: |
3066 | return S_IRUGO; |
3067 | default: |
3068 | return 0; |
3069 | } |
3070 | } |
3071 | |
3072 | return 0; |
3073 | } |
3074 | EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); |
3075 | |
3076 | static int __init libcxgbi_init_module(void) |
3077 | { |
3078 | pr_info("%s" , version); |
3079 | |
3080 | BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < |
3081 | sizeof(struct cxgbi_skb_cb)); |
3082 | rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
3083 | if (!rsvd_page) |
3084 | return -ENOMEM; |
3085 | |
3086 | return 0; |
3087 | } |
3088 | |
3089 | static void __exit libcxgbi_exit_module(void) |
3090 | { |
3091 | cxgbi_device_unregister_all(0xFF); |
3092 | put_page(page: rsvd_page); |
3093 | return; |
3094 | } |
3095 | |
3096 | module_init(libcxgbi_init_module); |
3097 | module_exit(libcxgbi_exit_module); |
3098 | |