1 | /* |
2 | * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
34 | |
35 | #include <linux/list.h> |
36 | #include <linux/slab.h> |
37 | #include <net/neighbour.h> |
38 | #include <linux/notifier.h> |
39 | #include <linux/atomic.h> |
40 | #include <linux/proc_fs.h> |
41 | #include <linux/if_vlan.h> |
42 | #include <net/netevent.h> |
43 | #include <linux/highmem.h> |
44 | #include <linux/vmalloc.h> |
45 | #include <linux/export.h> |
46 | |
47 | #include "common.h" |
48 | #include "regs.h" |
49 | #include "cxgb3_ioctl.h" |
50 | #include "cxgb3_ctl_defs.h" |
51 | #include "cxgb3_defs.h" |
52 | #include "l2t.h" |
53 | #include "firmware_exports.h" |
54 | #include "cxgb3_offload.h" |
55 | |
56 | static LIST_HEAD(client_list); |
57 | static LIST_HEAD(ofld_dev_list); |
58 | static DEFINE_MUTEX(cxgb3_db_lock); |
59 | |
60 | static DEFINE_RWLOCK(adapter_list_lock); |
61 | static LIST_HEAD(adapter_list); |
62 | |
63 | static const unsigned int MAX_ATIDS = 64 * 1024; |
64 | static const unsigned int ATID_BASE = 0x10000; |
65 | |
66 | static void cxgb_neigh_update(struct neighbour *neigh); |
67 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, |
68 | struct neighbour *neigh, const void *daddr); |
69 | |
70 | static inline int offload_activated(struct t3cdev *tdev) |
71 | { |
72 | const struct adapter *adapter = tdev2adap(tdev); |
73 | |
74 | return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); |
75 | } |
76 | |
77 | /** |
78 | * cxgb3_register_client - register an offload client |
79 | * @client: the client |
80 | * |
81 | * Add the client to the client list, |
82 | * and call backs the client for each activated offload device |
83 | */ |
84 | void cxgb3_register_client(struct cxgb3_client *client) |
85 | { |
86 | struct t3cdev *tdev; |
87 | |
88 | mutex_lock(&cxgb3_db_lock); |
89 | list_add_tail(new: &client->client_list, head: &client_list); |
90 | |
91 | if (client->add) { |
92 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { |
93 | if (offload_activated(tdev)) |
94 | client->add(tdev); |
95 | } |
96 | } |
97 | mutex_unlock(lock: &cxgb3_db_lock); |
98 | } |
99 | |
100 | EXPORT_SYMBOL(cxgb3_register_client); |
101 | |
102 | /** |
103 | * cxgb3_unregister_client - unregister an offload client |
104 | * @client: the client |
105 | * |
106 | * Remove the client to the client list, |
107 | * and call backs the client for each activated offload device. |
108 | */ |
109 | void cxgb3_unregister_client(struct cxgb3_client *client) |
110 | { |
111 | struct t3cdev *tdev; |
112 | |
113 | mutex_lock(&cxgb3_db_lock); |
114 | list_del(entry: &client->client_list); |
115 | |
116 | if (client->remove) { |
117 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { |
118 | if (offload_activated(tdev)) |
119 | client->remove(tdev); |
120 | } |
121 | } |
122 | mutex_unlock(lock: &cxgb3_db_lock); |
123 | } |
124 | |
125 | EXPORT_SYMBOL(cxgb3_unregister_client); |
126 | |
127 | /** |
128 | * cxgb3_add_clients - activate registered clients for an offload device |
129 | * @tdev: the offload device |
130 | * |
131 | * Call backs all registered clients once a offload device is activated |
132 | */ |
133 | void cxgb3_add_clients(struct t3cdev *tdev) |
134 | { |
135 | struct cxgb3_client *client; |
136 | |
137 | mutex_lock(&cxgb3_db_lock); |
138 | list_for_each_entry(client, &client_list, client_list) { |
139 | if (client->add) |
140 | client->add(tdev); |
141 | } |
142 | mutex_unlock(lock: &cxgb3_db_lock); |
143 | } |
144 | |
145 | /** |
146 | * cxgb3_remove_clients - deactivates registered clients |
147 | * for an offload device |
148 | * @tdev: the offload device |
149 | * |
150 | * Call backs all registered clients once a offload device is deactivated |
151 | */ |
152 | void cxgb3_remove_clients(struct t3cdev *tdev) |
153 | { |
154 | struct cxgb3_client *client; |
155 | |
156 | mutex_lock(&cxgb3_db_lock); |
157 | list_for_each_entry(client, &client_list, client_list) { |
158 | if (client->remove) |
159 | client->remove(tdev); |
160 | } |
161 | mutex_unlock(lock: &cxgb3_db_lock); |
162 | } |
163 | |
164 | void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) |
165 | { |
166 | struct cxgb3_client *client; |
167 | |
168 | mutex_lock(&cxgb3_db_lock); |
169 | list_for_each_entry(client, &client_list, client_list) { |
170 | if (client->event_handler) |
171 | client->event_handler(tdev, event, port); |
172 | } |
173 | mutex_unlock(lock: &cxgb3_db_lock); |
174 | } |
175 | |
176 | static struct net_device *get_iff_from_mac(struct adapter *adapter, |
177 | const unsigned char *mac, |
178 | unsigned int vlan) |
179 | { |
180 | int i; |
181 | |
182 | for_each_port(adapter, i) { |
183 | struct net_device *dev = adapter->port[i]; |
184 | |
185 | if (ether_addr_equal(addr1: dev->dev_addr, addr2: mac)) { |
186 | rcu_read_lock(); |
187 | if (vlan && vlan != VLAN_VID_MASK) { |
188 | dev = __vlan_find_dev_deep_rcu(real_dev: dev, htons(ETH_P_8021Q), vlan_id: vlan); |
189 | } else if (netif_is_bond_slave(dev)) { |
190 | struct net_device *upper_dev; |
191 | |
192 | while ((upper_dev = |
193 | netdev_master_upper_dev_get_rcu(dev))) |
194 | dev = upper_dev; |
195 | } |
196 | rcu_read_unlock(); |
197 | return dev; |
198 | } |
199 | } |
200 | return NULL; |
201 | } |
202 | |
203 | static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, |
204 | void *data) |
205 | { |
206 | int i; |
207 | int ret = 0; |
208 | unsigned int val = 0; |
209 | struct ulp_iscsi_info *uiip = data; |
210 | |
211 | switch (req) { |
212 | case ULP_ISCSI_GET_PARAMS: |
213 | uiip->pdev = adapter->pdev; |
214 | uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); |
215 | uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); |
216 | uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); |
217 | |
218 | val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); |
219 | for (i = 0; i < 4; i++, val >>= 8) |
220 | uiip->pgsz_factor[i] = val & 0xFF; |
221 | |
222 | val = t3_read_reg(adapter, A_TP_PARA_REG7); |
223 | uiip->max_txsz = |
224 | uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, |
225 | (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); |
226 | /* |
227 | * On tx, the iscsi pdu has to be <= tx page size and has to |
228 | * fit into the Tx PM FIFO. |
229 | */ |
230 | val = min(adapter->params.tp.tx_pg_size, |
231 | t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); |
232 | uiip->max_txsz = min(val, uiip->max_txsz); |
233 | |
234 | /* set MaxRxData to 16224 */ |
235 | val = t3_read_reg(adapter, A_TP_PARA_REG2); |
236 | if ((val >> S_MAXRXDATA) != 0x3f60) { |
237 | val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); |
238 | val |= V_MAXRXDATA(0x3f60); |
239 | pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n" , |
240 | adapter->name, val); |
241 | t3_write_reg(adapter, A_TP_PARA_REG2, val); |
242 | } |
243 | |
244 | /* |
245 | * on rx, the iscsi pdu has to be < rx page size and the |
246 | * max rx data length programmed in TP |
247 | */ |
248 | val = min(adapter->params.tp.rx_pg_size, |
249 | ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> |
250 | S_MAXRXDATA) & M_MAXRXDATA); |
251 | uiip->max_rxsz = min(val, uiip->max_rxsz); |
252 | break; |
253 | case ULP_ISCSI_SET_PARAMS: |
254 | t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, val: uiip->tagmask); |
255 | /* program the ddp page sizes */ |
256 | for (i = 0; i < 4; i++) |
257 | val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); |
258 | if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { |
259 | pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n" , |
260 | adapter->name, val, uiip->pgsz_factor[0], |
261 | uiip->pgsz_factor[1], uiip->pgsz_factor[2], |
262 | uiip->pgsz_factor[3]); |
263 | t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); |
264 | } |
265 | break; |
266 | default: |
267 | ret = -EOPNOTSUPP; |
268 | } |
269 | return ret; |
270 | } |
271 | |
272 | /* Response queue used for RDMA events. */ |
273 | #define ASYNC_NOTIF_RSPQ 0 |
274 | |
275 | static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) |
276 | { |
277 | int ret = 0; |
278 | |
279 | switch (req) { |
280 | case RDMA_GET_PARAMS: { |
281 | struct rdma_info *rdma = data; |
282 | struct pci_dev *pdev = adapter->pdev; |
283 | |
284 | rdma->udbell_physbase = pci_resource_start(pdev, 2); |
285 | rdma->udbell_len = pci_resource_len(pdev, 2); |
286 | rdma->tpt_base = |
287 | t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); |
288 | rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); |
289 | rdma->pbl_base = |
290 | t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); |
291 | rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); |
292 | rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); |
293 | rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); |
294 | rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; |
295 | rdma->pdev = pdev; |
296 | break; |
297 | } |
298 | case RDMA_CQ_OP:{ |
299 | unsigned long flags; |
300 | struct rdma_cq_op *rdma = data; |
301 | |
302 | /* may be called in any context */ |
303 | spin_lock_irqsave(&adapter->sge.reg_lock, flags); |
304 | ret = t3_sge_cqcntxt_op(adapter, id: rdma->id, op: rdma->op, |
305 | credits: rdma->credits); |
306 | spin_unlock_irqrestore(lock: &adapter->sge.reg_lock, flags); |
307 | break; |
308 | } |
309 | case RDMA_GET_MEM:{ |
310 | struct ch_mem_range *t = data; |
311 | struct mc7 *mem; |
312 | |
313 | if ((t->addr & 7) || (t->len & 7)) |
314 | return -EINVAL; |
315 | if (t->mem_id == MEM_CM) |
316 | mem = &adapter->cm; |
317 | else if (t->mem_id == MEM_PMRX) |
318 | mem = &adapter->pmrx; |
319 | else if (t->mem_id == MEM_PMTX) |
320 | mem = &adapter->pmtx; |
321 | else |
322 | return -EINVAL; |
323 | |
324 | ret = |
325 | t3_mc7_bd_read(mc7: mem, start: t->addr / 8, n: t->len / 8, |
326 | buf: (u64 *) t->buf); |
327 | if (ret) |
328 | return ret; |
329 | break; |
330 | } |
331 | case RDMA_CQ_SETUP:{ |
332 | struct rdma_cq_setup *rdma = data; |
333 | |
334 | spin_lock_irq(lock: &adapter->sge.reg_lock); |
335 | ret = |
336 | t3_sge_init_cqcntxt(adapter, id: rdma->id, |
337 | base_addr: rdma->base_addr, size: rdma->size, |
338 | ASYNC_NOTIF_RSPQ, |
339 | ovfl_mode: rdma->ovfl_mode, credits: rdma->credits, |
340 | credit_thres: rdma->credit_thres); |
341 | spin_unlock_irq(lock: &adapter->sge.reg_lock); |
342 | break; |
343 | } |
344 | case RDMA_CQ_DISABLE: |
345 | spin_lock_irq(lock: &adapter->sge.reg_lock); |
346 | ret = t3_sge_disable_cqcntxt(adapter, id: *(unsigned int *)data); |
347 | spin_unlock_irq(lock: &adapter->sge.reg_lock); |
348 | break; |
349 | case RDMA_CTRL_QP_SETUP:{ |
350 | struct rdma_ctrlqp_setup *rdma = data; |
351 | |
352 | spin_lock_irq(lock: &adapter->sge.reg_lock); |
353 | ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, gts_enable: 0, |
354 | type: SGE_CNTXT_RDMA, |
355 | ASYNC_NOTIF_RSPQ, |
356 | base_addr: rdma->base_addr, size: rdma->size, |
357 | FW_RI_TID_START, gen: 1, cidx: 0); |
358 | spin_unlock_irq(lock: &adapter->sge.reg_lock); |
359 | break; |
360 | } |
361 | case RDMA_GET_MIB: { |
362 | spin_lock(lock: &adapter->stats_lock); |
363 | t3_tp_get_mib_stats(adap: adapter, tps: (struct tp_mib_stats *)data); |
364 | spin_unlock(lock: &adapter->stats_lock); |
365 | break; |
366 | } |
367 | default: |
368 | ret = -EOPNOTSUPP; |
369 | } |
370 | return ret; |
371 | } |
372 | |
373 | static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) |
374 | { |
375 | struct adapter *adapter = tdev2adap(tdev); |
376 | struct tid_range *tid; |
377 | struct mtutab *mtup; |
378 | struct iff_mac *iffmacp; |
379 | struct ddp_params *ddpp; |
380 | struct adap_ports *ports; |
381 | struct ofld_page_info *rx_page_info; |
382 | struct tp_params *tp = &adapter->params.tp; |
383 | int i; |
384 | |
385 | switch (req) { |
386 | case GET_MAX_OUTSTANDING_WR: |
387 | *(unsigned int *)data = FW_WR_NUM; |
388 | break; |
389 | case GET_WR_LEN: |
390 | *(unsigned int *)data = WR_FLITS; |
391 | break; |
392 | case GET_TX_MAX_CHUNK: |
393 | *(unsigned int *)data = 1 << 20; /* 1MB */ |
394 | break; |
395 | case GET_TID_RANGE: |
396 | tid = data; |
397 | tid->num = t3_mc5_size(p: &adapter->mc5) - |
398 | adapter->params.mc5.nroutes - |
399 | adapter->params.mc5.nfilters - adapter->params.mc5.nservers; |
400 | tid->base = 0; |
401 | break; |
402 | case GET_STID_RANGE: |
403 | tid = data; |
404 | tid->num = adapter->params.mc5.nservers; |
405 | tid->base = t3_mc5_size(p: &adapter->mc5) - tid->num - |
406 | adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; |
407 | break; |
408 | case GET_L2T_CAPACITY: |
409 | *(unsigned int *)data = 2048; |
410 | break; |
411 | case GET_MTUS: |
412 | mtup = data; |
413 | mtup->size = NMTUS; |
414 | mtup->mtus = adapter->params.mtus; |
415 | break; |
416 | case GET_IFF_FROM_MAC: |
417 | iffmacp = data; |
418 | iffmacp->dev = get_iff_from_mac(adapter, mac: iffmacp->mac_addr, |
419 | vlan: iffmacp->vlan_tag & |
420 | VLAN_VID_MASK); |
421 | break; |
422 | case GET_DDP_PARAMS: |
423 | ddpp = data; |
424 | ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); |
425 | ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); |
426 | ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); |
427 | break; |
428 | case GET_PORTS: |
429 | ports = data; |
430 | ports->nports = adapter->params.nports; |
431 | for_each_port(adapter, i) |
432 | ports->lldevs[i] = adapter->port[i]; |
433 | break; |
434 | case ULP_ISCSI_GET_PARAMS: |
435 | case ULP_ISCSI_SET_PARAMS: |
436 | if (!offload_running(adapter)) |
437 | return -EAGAIN; |
438 | return cxgb_ulp_iscsi_ctl(adapter, req, data); |
439 | case RDMA_GET_PARAMS: |
440 | case RDMA_CQ_OP: |
441 | case RDMA_CQ_SETUP: |
442 | case RDMA_CQ_DISABLE: |
443 | case RDMA_CTRL_QP_SETUP: |
444 | case RDMA_GET_MEM: |
445 | case RDMA_GET_MIB: |
446 | if (!offload_running(adapter)) |
447 | return -EAGAIN; |
448 | return cxgb_rdma_ctl(adapter, req, data); |
449 | case GET_RX_PAGE_INFO: |
450 | rx_page_info = data; |
451 | rx_page_info->page_size = tp->rx_pg_size; |
452 | rx_page_info->num = tp->rx_num_pgs; |
453 | break; |
454 | case GET_ISCSI_IPV4ADDR: { |
455 | struct iscsi_ipv4addr *p = data; |
456 | struct port_info *pi = netdev_priv(dev: p->dev); |
457 | p->ipv4addr = pi->iscsi_ipv4addr; |
458 | break; |
459 | } |
460 | case GET_EMBEDDED_INFO: { |
461 | struct ch_embedded_info *e = data; |
462 | |
463 | spin_lock(lock: &adapter->stats_lock); |
464 | t3_get_fw_version(adapter, vers: &e->fw_vers); |
465 | t3_get_tp_version(adapter, vers: &e->tp_vers); |
466 | spin_unlock(lock: &adapter->stats_lock); |
467 | break; |
468 | } |
469 | default: |
470 | return -EOPNOTSUPP; |
471 | } |
472 | return 0; |
473 | } |
474 | |
475 | /* |
476 | * Dummy handler for Rx offload packets in case we get an offload packet before |
477 | * proper processing is setup. This complains and drops the packet as it isn't |
478 | * normal to get offload packets at this stage. |
479 | */ |
480 | static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, |
481 | int n) |
482 | { |
483 | while (n--) |
484 | dev_kfree_skb_any(skb: skbs[n]); |
485 | return 0; |
486 | } |
487 | |
488 | static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) |
489 | { |
490 | } |
491 | |
492 | void cxgb3_set_dummy_ops(struct t3cdev *dev) |
493 | { |
494 | dev->recv = rx_offload_blackhole; |
495 | dev->neigh_update = dummy_neigh_update; |
496 | } |
497 | |
498 | /* |
499 | * Free an active-open TID. |
500 | */ |
501 | void *cxgb3_free_atid(struct t3cdev *tdev, int atid) |
502 | { |
503 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
504 | union active_open_entry *p = atid2entry(t, atid); |
505 | void *ctx = p->t3c_tid.ctx; |
506 | |
507 | spin_lock_bh(lock: &t->atid_lock); |
508 | p->next = t->afree; |
509 | t->afree = p; |
510 | t->atids_in_use--; |
511 | spin_unlock_bh(lock: &t->atid_lock); |
512 | |
513 | return ctx; |
514 | } |
515 | |
516 | EXPORT_SYMBOL(cxgb3_free_atid); |
517 | |
518 | /* |
519 | * Free a server TID and return it to the free pool. |
520 | */ |
521 | void cxgb3_free_stid(struct t3cdev *tdev, int stid) |
522 | { |
523 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
524 | union listen_entry *p = stid2entry(t, stid); |
525 | |
526 | spin_lock_bh(lock: &t->stid_lock); |
527 | p->next = t->sfree; |
528 | t->sfree = p; |
529 | t->stids_in_use--; |
530 | spin_unlock_bh(lock: &t->stid_lock); |
531 | } |
532 | |
533 | EXPORT_SYMBOL(cxgb3_free_stid); |
534 | |
535 | void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, |
536 | void *ctx, unsigned int tid) |
537 | { |
538 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
539 | |
540 | t->tid_tab[tid].client = client; |
541 | t->tid_tab[tid].ctx = ctx; |
542 | atomic_inc(v: &t->tids_in_use); |
543 | } |
544 | |
545 | EXPORT_SYMBOL(cxgb3_insert_tid); |
546 | |
547 | /* |
548 | * Populate a TID_RELEASE WR. The skb must be already propely sized. |
549 | */ |
550 | static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) |
551 | { |
552 | struct cpl_tid_release *req; |
553 | |
554 | skb->priority = CPL_PRIORITY_SETUP; |
555 | req = __skb_put(skb, len: sizeof(*req)); |
556 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
557 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); |
558 | } |
559 | |
560 | static void t3_process_tid_release_list(struct work_struct *work) |
561 | { |
562 | struct t3c_data *td = container_of(work, struct t3c_data, |
563 | tid_release_task); |
564 | struct sk_buff *skb; |
565 | struct t3cdev *tdev = td->dev; |
566 | |
567 | |
568 | spin_lock_bh(lock: &td->tid_release_lock); |
569 | while (td->tid_release_list) { |
570 | struct t3c_tid_entry *p = td->tid_release_list; |
571 | |
572 | td->tid_release_list = p->ctx; |
573 | spin_unlock_bh(lock: &td->tid_release_lock); |
574 | |
575 | skb = alloc_skb(size: sizeof(struct cpl_tid_release), |
576 | GFP_KERNEL); |
577 | if (!skb) |
578 | skb = td->nofail_skb; |
579 | if (!skb) { |
580 | spin_lock_bh(lock: &td->tid_release_lock); |
581 | p->ctx = (void *)td->tid_release_list; |
582 | td->tid_release_list = p; |
583 | break; |
584 | } |
585 | mk_tid_release(skb, tid: p - td->tid_maps.tid_tab); |
586 | cxgb3_ofld_send(dev: tdev, skb); |
587 | p->ctx = NULL; |
588 | if (skb == td->nofail_skb) |
589 | td->nofail_skb = |
590 | alloc_skb(size: sizeof(struct cpl_tid_release), |
591 | GFP_KERNEL); |
592 | spin_lock_bh(lock: &td->tid_release_lock); |
593 | } |
594 | td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; |
595 | spin_unlock_bh(lock: &td->tid_release_lock); |
596 | |
597 | if (!td->nofail_skb) |
598 | td->nofail_skb = |
599 | alloc_skb(size: sizeof(struct cpl_tid_release), |
600 | GFP_KERNEL); |
601 | } |
602 | |
603 | /* use ctx as a next pointer in the tid release list */ |
604 | void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) |
605 | { |
606 | struct t3c_data *td = T3C_DATA(tdev); |
607 | struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; |
608 | |
609 | spin_lock_bh(lock: &td->tid_release_lock); |
610 | p->ctx = (void *)td->tid_release_list; |
611 | p->client = NULL; |
612 | td->tid_release_list = p; |
613 | if (!p->ctx || td->release_list_incomplete) |
614 | schedule_work(work: &td->tid_release_task); |
615 | spin_unlock_bh(lock: &td->tid_release_lock); |
616 | } |
617 | |
618 | EXPORT_SYMBOL(cxgb3_queue_tid_release); |
619 | |
620 | /* |
621 | * Remove a tid from the TID table. A client may defer processing its last |
622 | * CPL message if it is locked at the time it arrives, and while the message |
623 | * sits in the client's backlog the TID may be reused for another connection. |
624 | * To handle this we atomically switch the TID association if it still points |
625 | * to the original client context. |
626 | */ |
627 | void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) |
628 | { |
629 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
630 | |
631 | BUG_ON(tid >= t->ntids); |
632 | if (tdev->type == T3A) |
633 | (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); |
634 | else { |
635 | struct sk_buff *skb; |
636 | |
637 | skb = alloc_skb(size: sizeof(struct cpl_tid_release), GFP_ATOMIC); |
638 | if (likely(skb)) { |
639 | mk_tid_release(skb, tid); |
640 | cxgb3_ofld_send(dev: tdev, skb); |
641 | t->tid_tab[tid].ctx = NULL; |
642 | } else |
643 | cxgb3_queue_tid_release(tdev, tid); |
644 | } |
645 | atomic_dec(v: &t->tids_in_use); |
646 | } |
647 | |
648 | EXPORT_SYMBOL(cxgb3_remove_tid); |
649 | |
650 | int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, |
651 | void *ctx) |
652 | { |
653 | int atid = -1; |
654 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
655 | |
656 | spin_lock_bh(lock: &t->atid_lock); |
657 | if (t->afree && |
658 | t->atids_in_use + atomic_read(v: &t->tids_in_use) + MC5_MIN_TIDS <= |
659 | t->ntids) { |
660 | union active_open_entry *p = t->afree; |
661 | |
662 | atid = (p - t->atid_tab) + t->atid_base; |
663 | t->afree = p->next; |
664 | p->t3c_tid.ctx = ctx; |
665 | p->t3c_tid.client = client; |
666 | t->atids_in_use++; |
667 | } |
668 | spin_unlock_bh(lock: &t->atid_lock); |
669 | return atid; |
670 | } |
671 | |
672 | EXPORT_SYMBOL(cxgb3_alloc_atid); |
673 | |
674 | int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, |
675 | void *ctx) |
676 | { |
677 | int stid = -1; |
678 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; |
679 | |
680 | spin_lock_bh(lock: &t->stid_lock); |
681 | if (t->sfree) { |
682 | union listen_entry *p = t->sfree; |
683 | |
684 | stid = (p - t->stid_tab) + t->stid_base; |
685 | t->sfree = p->next; |
686 | p->t3c_tid.ctx = ctx; |
687 | p->t3c_tid.client = client; |
688 | t->stids_in_use++; |
689 | } |
690 | spin_unlock_bh(lock: &t->stid_lock); |
691 | return stid; |
692 | } |
693 | |
694 | EXPORT_SYMBOL(cxgb3_alloc_stid); |
695 | |
696 | /* Get the t3cdev associated with a net_device */ |
697 | struct t3cdev *dev2t3cdev(struct net_device *dev) |
698 | { |
699 | const struct port_info *pi = netdev_priv(dev); |
700 | |
701 | return (struct t3cdev *)pi->adapter; |
702 | } |
703 | |
704 | EXPORT_SYMBOL(dev2t3cdev); |
705 | |
706 | static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
707 | { |
708 | struct cpl_smt_write_rpl *rpl = cplhdr(skb); |
709 | |
710 | if (rpl->status != CPL_ERR_NONE) |
711 | pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n" , |
712 | rpl->status, GET_TID(rpl)); |
713 | |
714 | return CPL_RET_BUF_DONE; |
715 | } |
716 | |
717 | static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
718 | { |
719 | struct cpl_l2t_write_rpl *rpl = cplhdr(skb); |
720 | |
721 | if (rpl->status != CPL_ERR_NONE) |
722 | pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n" , |
723 | rpl->status, GET_TID(rpl)); |
724 | |
725 | return CPL_RET_BUF_DONE; |
726 | } |
727 | |
728 | static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
729 | { |
730 | struct cpl_rte_write_rpl *rpl = cplhdr(skb); |
731 | |
732 | if (rpl->status != CPL_ERR_NONE) |
733 | pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n" , |
734 | rpl->status, GET_TID(rpl)); |
735 | |
736 | return CPL_RET_BUF_DONE; |
737 | } |
738 | |
739 | static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) |
740 | { |
741 | struct cpl_act_open_rpl *rpl = cplhdr(skb); |
742 | unsigned int atid = G_TID(ntohl(rpl->atid)); |
743 | struct t3c_tid_entry *t3c_tid; |
744 | |
745 | t3c_tid = lookup_atid(t: &(T3C_DATA(dev))->tid_maps, tid: atid); |
746 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client && |
747 | t3c_tid->client->handlers && |
748 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { |
749 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, |
750 | t3c_tid-> |
751 | ctx); |
752 | } else { |
753 | pr_err("%s: received clientless CPL command 0x%x\n" , |
754 | dev->name, CPL_ACT_OPEN_RPL); |
755 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
756 | } |
757 | } |
758 | |
759 | static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) |
760 | { |
761 | union opcode_tid *p = cplhdr(skb); |
762 | unsigned int stid = G_TID(ntohl(p->opcode_tid)); |
763 | struct t3c_tid_entry *t3c_tid; |
764 | |
765 | t3c_tid = lookup_stid(t: &(T3C_DATA(dev))->tid_maps, tid: stid); |
766 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
767 | t3c_tid->client->handlers[p->opcode]) { |
768 | return t3c_tid->client->handlers[p->opcode] (dev, skb, |
769 | t3c_tid->ctx); |
770 | } else { |
771 | pr_err("%s: received clientless CPL command 0x%x\n" , |
772 | dev->name, p->opcode); |
773 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
774 | } |
775 | } |
776 | |
777 | static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) |
778 | { |
779 | union opcode_tid *p = cplhdr(skb); |
780 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); |
781 | struct t3c_tid_entry *t3c_tid; |
782 | |
783 | t3c_tid = lookup_tid(t: &(T3C_DATA(dev))->tid_maps, tid: hwtid); |
784 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
785 | t3c_tid->client->handlers[p->opcode]) { |
786 | return t3c_tid->client->handlers[p->opcode] |
787 | (dev, skb, t3c_tid->ctx); |
788 | } else { |
789 | pr_err("%s: received clientless CPL command 0x%x\n" , |
790 | dev->name, p->opcode); |
791 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
792 | } |
793 | } |
794 | |
795 | static int do_cr(struct t3cdev *dev, struct sk_buff *skb) |
796 | { |
797 | struct cpl_pass_accept_req *req = cplhdr(skb); |
798 | unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); |
799 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
800 | struct t3c_tid_entry *t3c_tid; |
801 | unsigned int tid = GET_TID(req); |
802 | |
803 | if (unlikely(tid >= t->ntids)) { |
804 | printk("%s: passive open TID %u too large\n" , |
805 | dev->name, tid); |
806 | t3_fatal_err(tdev2adap(dev)); |
807 | return CPL_RET_BUF_DONE; |
808 | } |
809 | |
810 | t3c_tid = lookup_stid(t, tid: stid); |
811 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
812 | t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { |
813 | return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] |
814 | (dev, skb, t3c_tid->ctx); |
815 | } else { |
816 | pr_err("%s: received clientless CPL command 0x%x\n" , |
817 | dev->name, CPL_PASS_ACCEPT_REQ); |
818 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
819 | } |
820 | } |
821 | |
822 | /* |
823 | * Returns an sk_buff for a reply CPL message of size len. If the input |
824 | * sk_buff has no other users it is trimmed and reused, otherwise a new buffer |
825 | * is allocated. The input skb must be of size at least len. Note that this |
826 | * operation does not destroy the original skb data even if it decides to reuse |
827 | * the buffer. |
828 | */ |
829 | static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, |
830 | gfp_t gfp) |
831 | { |
832 | if (likely(!skb_cloned(skb))) { |
833 | BUG_ON(skb->len < len); |
834 | __skb_trim(skb, len); |
835 | skb_get(skb); |
836 | } else { |
837 | skb = alloc_skb(size: len, priority: gfp); |
838 | if (skb) |
839 | __skb_put(skb, len); |
840 | } |
841 | return skb; |
842 | } |
843 | |
844 | static int (struct t3cdev *dev, struct sk_buff *skb) |
845 | { |
846 | union opcode_tid *p = cplhdr(skb); |
847 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); |
848 | struct t3c_tid_entry *t3c_tid; |
849 | |
850 | t3c_tid = lookup_tid(t: &(T3C_DATA(dev))->tid_maps, tid: hwtid); |
851 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
852 | t3c_tid->client->handlers[p->opcode]) { |
853 | return t3c_tid->client->handlers[p->opcode] |
854 | (dev, skb, t3c_tid->ctx); |
855 | } else { |
856 | struct cpl_abort_req_rss *req = cplhdr(skb); |
857 | struct cpl_abort_rpl *rpl; |
858 | struct sk_buff *reply_skb; |
859 | unsigned int tid = GET_TID(req); |
860 | u8 cmd = req->status; |
861 | |
862 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || |
863 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) |
864 | goto out; |
865 | |
866 | reply_skb = cxgb3_get_cpl_reply_skb(skb, |
867 | len: sizeof(struct |
868 | cpl_abort_rpl), |
869 | GFP_ATOMIC); |
870 | |
871 | if (!reply_skb) { |
872 | printk("do_abort_req_rss: couldn't get skb!\n" ); |
873 | goto out; |
874 | } |
875 | reply_skb->priority = CPL_PRIORITY_DATA; |
876 | __skb_put(skb: reply_skb, len: sizeof(struct cpl_abort_rpl)); |
877 | rpl = cplhdr(skb: reply_skb); |
878 | rpl->wr.wr_hi = |
879 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); |
880 | rpl->wr.wr_lo = htonl(V_WR_TID(tid)); |
881 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); |
882 | rpl->cmd = cmd; |
883 | cxgb3_ofld_send(dev, skb: reply_skb); |
884 | out: |
885 | return CPL_RET_BUF_DONE; |
886 | } |
887 | } |
888 | |
889 | static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) |
890 | { |
891 | struct cpl_act_establish *req = cplhdr(skb); |
892 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); |
893 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
894 | struct t3c_tid_entry *t3c_tid; |
895 | unsigned int tid = GET_TID(req); |
896 | |
897 | if (unlikely(tid >= t->ntids)) { |
898 | printk("%s: active establish TID %u too large\n" , |
899 | dev->name, tid); |
900 | t3_fatal_err(tdev2adap(dev)); |
901 | return CPL_RET_BUF_DONE; |
902 | } |
903 | |
904 | t3c_tid = lookup_atid(t, tid: atid); |
905 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
906 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { |
907 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] |
908 | (dev, skb, t3c_tid->ctx); |
909 | } else { |
910 | pr_err("%s: received clientless CPL command 0x%x\n" , |
911 | dev->name, CPL_ACT_ESTABLISH); |
912 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
913 | } |
914 | } |
915 | |
916 | static int do_trace(struct t3cdev *dev, struct sk_buff *skb) |
917 | { |
918 | struct cpl_trace_pkt *p = cplhdr(skb); |
919 | |
920 | skb->protocol = htons(0xffff); |
921 | skb->dev = dev->lldev; |
922 | skb_pull(skb, len: sizeof(*p)); |
923 | skb_reset_mac_header(skb); |
924 | netif_receive_skb(skb); |
925 | return 0; |
926 | } |
927 | |
928 | /* |
929 | * That skb would better have come from process_responses() where we abuse |
930 | * ->priority and ->csum to carry our data. NB: if we get to per-arch |
931 | * ->csum, the things might get really interesting here. |
932 | */ |
933 | |
934 | static inline u32 get_hwtid(struct sk_buff *skb) |
935 | { |
936 | return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; |
937 | } |
938 | |
939 | static inline u32 get_opcode(struct sk_buff *skb) |
940 | { |
941 | return G_OPCODE(ntohl((__force __be32)skb->csum)); |
942 | } |
943 | |
944 | static int do_term(struct t3cdev *dev, struct sk_buff *skb) |
945 | { |
946 | unsigned int hwtid = get_hwtid(skb); |
947 | unsigned int opcode = get_opcode(skb); |
948 | struct t3c_tid_entry *t3c_tid; |
949 | |
950 | t3c_tid = lookup_tid(t: &(T3C_DATA(dev))->tid_maps, tid: hwtid); |
951 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
952 | t3c_tid->client->handlers[opcode]) { |
953 | return t3c_tid->client->handlers[opcode] (dev, skb, |
954 | t3c_tid->ctx); |
955 | } else { |
956 | pr_err("%s: received clientless CPL command 0x%x\n" , |
957 | dev->name, opcode); |
958 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
959 | } |
960 | } |
961 | |
962 | static int nb_callback(struct notifier_block *self, unsigned long event, |
963 | void *ctx) |
964 | { |
965 | switch (event) { |
966 | case (NETEVENT_NEIGH_UPDATE):{ |
967 | cxgb_neigh_update(neigh: (struct neighbour *)ctx); |
968 | break; |
969 | } |
970 | case (NETEVENT_REDIRECT):{ |
971 | struct netevent_redirect *nr = ctx; |
972 | cxgb_redirect(old: nr->old, new: nr->new, neigh: nr->neigh, |
973 | daddr: nr->daddr); |
974 | cxgb_neigh_update(neigh: nr->neigh); |
975 | break; |
976 | } |
977 | default: |
978 | break; |
979 | } |
980 | return 0; |
981 | } |
982 | |
983 | static struct notifier_block nb = { |
984 | .notifier_call = nb_callback |
985 | }; |
986 | |
987 | /* |
988 | * Process a received packet with an unknown/unexpected CPL opcode. |
989 | */ |
990 | static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) |
991 | { |
992 | pr_err("%s: received bad CPL command 0x%x\n" , dev->name, *skb->data); |
993 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
994 | } |
995 | |
996 | /* |
997 | * Handlers for each CPL opcode |
998 | */ |
999 | static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; |
1000 | |
1001 | /* |
1002 | * Add a new handler to the CPL dispatch table. A NULL handler may be supplied |
1003 | * to unregister an existing handler. |
1004 | */ |
1005 | void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) |
1006 | { |
1007 | if (opcode < NUM_CPL_CMDS) |
1008 | cpl_handlers[opcode] = h ? h : do_bad_cpl; |
1009 | else |
1010 | pr_err("T3C: handler registration for opcode %x failed\n" , |
1011 | opcode); |
1012 | } |
1013 | |
1014 | EXPORT_SYMBOL(t3_register_cpl_handler); |
1015 | |
1016 | /* |
1017 | * T3CDEV's receive method. |
1018 | */ |
1019 | static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) |
1020 | { |
1021 | while (n--) { |
1022 | struct sk_buff *skb = *skbs++; |
1023 | unsigned int opcode = get_opcode(skb); |
1024 | int ret = cpl_handlers[opcode] (dev, skb); |
1025 | |
1026 | #if VALIDATE_TID |
1027 | if (ret & CPL_RET_UNKNOWN_TID) { |
1028 | union opcode_tid *p = cplhdr(skb); |
1029 | |
1030 | pr_err("%s: CPL message (opcode %u) had unknown TID %u\n" , |
1031 | dev->name, opcode, G_TID(ntohl(p->opcode_tid))); |
1032 | } |
1033 | #endif |
1034 | if (ret & CPL_RET_BUF_DONE) |
1035 | kfree_skb(skb); |
1036 | } |
1037 | return 0; |
1038 | } |
1039 | |
1040 | /* |
1041 | * Sends an sk_buff to a T3C driver after dealing with any active network taps. |
1042 | */ |
1043 | int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) |
1044 | { |
1045 | int r; |
1046 | |
1047 | local_bh_disable(); |
1048 | r = dev->send(dev, skb); |
1049 | local_bh_enable(); |
1050 | return r; |
1051 | } |
1052 | |
1053 | EXPORT_SYMBOL(cxgb3_ofld_send); |
1054 | |
1055 | static int is_offloading(struct net_device *dev) |
1056 | { |
1057 | struct adapter *adapter; |
1058 | int i; |
1059 | |
1060 | read_lock_bh(&adapter_list_lock); |
1061 | list_for_each_entry(adapter, &adapter_list, adapter_list) { |
1062 | for_each_port(adapter, i) { |
1063 | if (dev == adapter->port[i]) { |
1064 | read_unlock_bh(&adapter_list_lock); |
1065 | return 1; |
1066 | } |
1067 | } |
1068 | } |
1069 | read_unlock_bh(&adapter_list_lock); |
1070 | return 0; |
1071 | } |
1072 | |
1073 | static void cxgb_neigh_update(struct neighbour *neigh) |
1074 | { |
1075 | struct net_device *dev; |
1076 | |
1077 | if (!neigh) |
1078 | return; |
1079 | dev = neigh->dev; |
1080 | if (dev && (is_offloading(dev))) { |
1081 | struct t3cdev *tdev = dev2t3cdev(dev); |
1082 | |
1083 | BUG_ON(!tdev); |
1084 | t3_l2t_update(dev: tdev, neigh); |
1085 | } |
1086 | } |
1087 | |
1088 | static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) |
1089 | { |
1090 | struct sk_buff *skb; |
1091 | struct cpl_set_tcb_field *req; |
1092 | |
1093 | skb = alloc_skb(size: sizeof(*req), GFP_ATOMIC); |
1094 | if (!skb) { |
1095 | pr_err("%s: cannot allocate skb!\n" , __func__); |
1096 | return; |
1097 | } |
1098 | skb->priority = CPL_PRIORITY_CONTROL; |
1099 | req = skb_put(skb, len: sizeof(*req)); |
1100 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
1101 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
1102 | req->reply = 0; |
1103 | req->cpu_idx = 0; |
1104 | req->word = htons(W_TCB_L2T_IX); |
1105 | req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); |
1106 | req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); |
1107 | tdev->send(tdev, skb); |
1108 | } |
1109 | |
1110 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, |
1111 | struct neighbour *neigh, |
1112 | const void *daddr) |
1113 | { |
1114 | struct net_device *dev; |
1115 | struct tid_info *ti; |
1116 | struct t3cdev *tdev; |
1117 | u32 tid; |
1118 | int update_tcb; |
1119 | struct l2t_entry *e; |
1120 | struct t3c_tid_entry *te; |
1121 | |
1122 | dev = neigh->dev; |
1123 | |
1124 | if (!is_offloading(dev)) |
1125 | return; |
1126 | tdev = dev2t3cdev(dev); |
1127 | BUG_ON(!tdev); |
1128 | |
1129 | /* Add new L2T entry */ |
1130 | e = t3_l2t_get(cdev: tdev, dst: new, dev, daddr); |
1131 | if (!e) { |
1132 | pr_err("%s: couldn't allocate new l2t entry!\n" , __func__); |
1133 | return; |
1134 | } |
1135 | |
1136 | /* Walk tid table and notify clients of dst change. */ |
1137 | ti = &(T3C_DATA(tdev))->tid_maps; |
1138 | for (tid = 0; tid < ti->ntids; tid++) { |
1139 | te = lookup_tid(t: ti, tid); |
1140 | BUG_ON(!te); |
1141 | if (te && te->ctx && te->client && te->client->redirect) { |
1142 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1143 | if (update_tcb) { |
1144 | rcu_read_lock(); |
1145 | l2t_hold(L2DATA(tdev), e); |
1146 | rcu_read_unlock(); |
1147 | set_l2t_ix(tdev, tid, e); |
1148 | } |
1149 | } |
1150 | } |
1151 | l2t_release(t: tdev, e); |
1152 | } |
1153 | |
1154 | /* |
1155 | * Allocate and initialize the TID tables. Returns 0 on success. |
1156 | */ |
1157 | static int init_tid_tabs(struct tid_info *t, unsigned int ntids, |
1158 | unsigned int natids, unsigned int nstids, |
1159 | unsigned int atid_base, unsigned int stid_base) |
1160 | { |
1161 | unsigned long size = ntids * sizeof(*t->tid_tab) + |
1162 | natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); |
1163 | |
1164 | t->tid_tab = kvzalloc(size, GFP_KERNEL); |
1165 | if (!t->tid_tab) |
1166 | return -ENOMEM; |
1167 | |
1168 | t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; |
1169 | t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; |
1170 | t->ntids = ntids; |
1171 | t->nstids = nstids; |
1172 | t->stid_base = stid_base; |
1173 | t->sfree = NULL; |
1174 | t->natids = natids; |
1175 | t->atid_base = atid_base; |
1176 | t->afree = NULL; |
1177 | t->stids_in_use = t->atids_in_use = 0; |
1178 | atomic_set(v: &t->tids_in_use, i: 0); |
1179 | spin_lock_init(&t->stid_lock); |
1180 | spin_lock_init(&t->atid_lock); |
1181 | |
1182 | /* |
1183 | * Setup the free lists for stid_tab and atid_tab. |
1184 | */ |
1185 | if (nstids) { |
1186 | while (--nstids) |
1187 | t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; |
1188 | t->sfree = t->stid_tab; |
1189 | } |
1190 | if (natids) { |
1191 | while (--natids) |
1192 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; |
1193 | t->afree = t->atid_tab; |
1194 | } |
1195 | return 0; |
1196 | } |
1197 | |
1198 | static void free_tid_maps(struct tid_info *t) |
1199 | { |
1200 | kvfree(addr: t->tid_tab); |
1201 | } |
1202 | |
1203 | static inline void add_adapter(struct adapter *adap) |
1204 | { |
1205 | write_lock_bh(&adapter_list_lock); |
1206 | list_add_tail(new: &adap->adapter_list, head: &adapter_list); |
1207 | write_unlock_bh(&adapter_list_lock); |
1208 | } |
1209 | |
1210 | static inline void remove_adapter(struct adapter *adap) |
1211 | { |
1212 | write_lock_bh(&adapter_list_lock); |
1213 | list_del(entry: &adap->adapter_list); |
1214 | write_unlock_bh(&adapter_list_lock); |
1215 | } |
1216 | |
1217 | int cxgb3_offload_activate(struct adapter *adapter) |
1218 | { |
1219 | struct t3cdev *dev = &adapter->tdev; |
1220 | int natids, err; |
1221 | struct t3c_data *t; |
1222 | struct tid_range stid_range, tid_range; |
1223 | struct mtutab mtutab; |
1224 | unsigned int l2t_capacity; |
1225 | struct l2t_data *l2td; |
1226 | |
1227 | t = kzalloc(size: sizeof(*t), GFP_KERNEL); |
1228 | if (!t) |
1229 | return -ENOMEM; |
1230 | |
1231 | err = -EOPNOTSUPP; |
1232 | if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || |
1233 | dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || |
1234 | dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || |
1235 | dev->ctl(dev, GET_MTUS, &mtutab) < 0 || |
1236 | dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || |
1237 | dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) |
1238 | goto out_free; |
1239 | |
1240 | err = -ENOMEM; |
1241 | l2td = t3_init_l2t(l2t_capacity); |
1242 | if (!l2td) |
1243 | goto out_free; |
1244 | |
1245 | natids = min(tid_range.num / 2, MAX_ATIDS); |
1246 | err = init_tid_tabs(t: &t->tid_maps, ntids: tid_range.num, natids, |
1247 | nstids: stid_range.num, atid_base: ATID_BASE, stid_base: stid_range.base); |
1248 | if (err) |
1249 | goto out_free_l2t; |
1250 | |
1251 | t->mtus = mtutab.mtus; |
1252 | t->nmtus = mtutab.size; |
1253 | |
1254 | INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); |
1255 | spin_lock_init(&t->tid_release_lock); |
1256 | INIT_LIST_HEAD(list: &t->list_node); |
1257 | t->dev = dev; |
1258 | |
1259 | RCU_INIT_POINTER(dev->l2opt, l2td); |
1260 | T3C_DATA(dev) = t; |
1261 | dev->recv = process_rx; |
1262 | dev->neigh_update = t3_l2t_update; |
1263 | |
1264 | /* Register netevent handler once */ |
1265 | if (list_empty(head: &adapter_list)) |
1266 | register_netevent_notifier(nb: &nb); |
1267 | |
1268 | t->nofail_skb = alloc_skb(size: sizeof(struct cpl_tid_release), GFP_KERNEL); |
1269 | t->release_list_incomplete = 0; |
1270 | |
1271 | add_adapter(adap: adapter); |
1272 | return 0; |
1273 | |
1274 | out_free_l2t: |
1275 | kvfree(addr: l2td); |
1276 | out_free: |
1277 | kfree(objp: t); |
1278 | return err; |
1279 | } |
1280 | |
1281 | static void clean_l2_data(struct rcu_head *head) |
1282 | { |
1283 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); |
1284 | kvfree(addr: d); |
1285 | } |
1286 | |
1287 | |
1288 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1289 | { |
1290 | struct t3cdev *tdev = &adapter->tdev; |
1291 | struct t3c_data *t = T3C_DATA(tdev); |
1292 | struct l2t_data *d; |
1293 | |
1294 | remove_adapter(adap: adapter); |
1295 | if (list_empty(head: &adapter_list)) |
1296 | unregister_netevent_notifier(nb: &nb); |
1297 | |
1298 | free_tid_maps(t: &t->tid_maps); |
1299 | T3C_DATA(tdev) = NULL; |
1300 | rcu_read_lock(); |
1301 | d = L2DATA(tdev); |
1302 | rcu_read_unlock(); |
1303 | RCU_INIT_POINTER(tdev->l2opt, NULL); |
1304 | call_rcu(head: &d->rcu_head, func: clean_l2_data); |
1305 | kfree_skb(skb: t->nofail_skb); |
1306 | kfree(objp: t); |
1307 | } |
1308 | |
1309 | static inline void register_tdev(struct t3cdev *tdev) |
1310 | { |
1311 | static int unit; |
1312 | |
1313 | mutex_lock(&cxgb3_db_lock); |
1314 | snprintf(buf: tdev->name, size: sizeof(tdev->name), fmt: "ofld_dev%d" , unit++); |
1315 | list_add_tail(new: &tdev->ofld_dev_list, head: &ofld_dev_list); |
1316 | mutex_unlock(lock: &cxgb3_db_lock); |
1317 | } |
1318 | |
1319 | static inline void unregister_tdev(struct t3cdev *tdev) |
1320 | { |
1321 | mutex_lock(&cxgb3_db_lock); |
1322 | list_del(entry: &tdev->ofld_dev_list); |
1323 | mutex_unlock(lock: &cxgb3_db_lock); |
1324 | } |
1325 | |
1326 | static inline int adap2type(struct adapter *adapter) |
1327 | { |
1328 | int type = 0; |
1329 | |
1330 | switch (adapter->params.rev) { |
1331 | case T3_REV_A: |
1332 | type = T3A; |
1333 | break; |
1334 | case T3_REV_B: |
1335 | case T3_REV_B2: |
1336 | type = T3B; |
1337 | break; |
1338 | case T3_REV_C: |
1339 | type = T3C; |
1340 | break; |
1341 | } |
1342 | return type; |
1343 | } |
1344 | |
1345 | void cxgb3_adapter_ofld(struct adapter *adapter) |
1346 | { |
1347 | struct t3cdev *tdev = &adapter->tdev; |
1348 | |
1349 | INIT_LIST_HEAD(list: &tdev->ofld_dev_list); |
1350 | |
1351 | cxgb3_set_dummy_ops(dev: tdev); |
1352 | tdev->send = t3_offload_tx; |
1353 | tdev->ctl = cxgb_offload_ctl; |
1354 | tdev->type = adap2type(adapter); |
1355 | |
1356 | register_tdev(tdev); |
1357 | } |
1358 | |
1359 | void cxgb3_adapter_unofld(struct adapter *adapter) |
1360 | { |
1361 | struct t3cdev *tdev = &adapter->tdev; |
1362 | |
1363 | tdev->recv = NULL; |
1364 | tdev->neigh_update = NULL; |
1365 | |
1366 | unregister_tdev(tdev); |
1367 | } |
1368 | |
1369 | void __init cxgb3_offload_init(void) |
1370 | { |
1371 | int i; |
1372 | |
1373 | for (i = 0; i < NUM_CPL_CMDS; ++i) |
1374 | cpl_handlers[i] = do_bad_cpl; |
1375 | |
1376 | t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); |
1377 | t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); |
1378 | t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); |
1379 | t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); |
1380 | t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); |
1381 | t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); |
1382 | t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); |
1383 | t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); |
1384 | t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); |
1385 | t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); |
1386 | t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); |
1387 | t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); |
1388 | t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); |
1389 | t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); |
1390 | t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); |
1391 | t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); |
1392 | t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); |
1393 | t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); |
1394 | t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); |
1395 | t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); |
1396 | t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); |
1397 | t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); |
1398 | t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); |
1399 | t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); |
1400 | t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); |
1401 | t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); |
1402 | } |
1403 | |