1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ |
3 | |
4 | /* |
5 | * nfp_netvf_main.c |
6 | * Netronome virtual function network device driver: Main entry point |
7 | * Author: Jason McMullan <jason.mcmullan@netronome.com> |
8 | * Rolf Neugebauer <rolf.neugebauer@netronome.com> |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/init.h> |
14 | #include <linux/etherdevice.h> |
15 | |
16 | #include "nfpcore/nfp_dev.h" |
17 | #include "nfp_net_ctrl.h" |
18 | #include "nfp_net.h" |
19 | #include "nfp_main.h" |
20 | |
21 | /** |
22 | * struct nfp_net_vf - NFP VF-specific device structure |
23 | * @nn: NFP Net structure for this device |
24 | * @irq_entries: Pre-allocated array of MSI-X entries |
25 | * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly) |
26 | * @ddir: Per-device debugfs directory |
27 | */ |
28 | struct nfp_net_vf { |
29 | struct nfp_net *nn; |
30 | |
31 | struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + |
32 | NFP_NET_MAX_TX_RINGS]; |
33 | u8 __iomem *q_bar; |
34 | |
35 | struct dentry *ddir; |
36 | }; |
37 | |
38 | static const char nfp_net_driver_name[] = "nfp_netvf" ; |
39 | |
40 | static const struct pci_device_id nfp_netvf_pci_device_ids[] = { |
41 | { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800_VF, |
42 | PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, |
43 | PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF, |
44 | }, |
45 | { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000_VF, |
46 | PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, |
47 | PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF, |
48 | }, |
49 | { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800_VF, |
50 | PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, |
51 | PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF, |
52 | }, |
53 | { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000_VF, |
54 | PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, |
55 | PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF, |
56 | }, |
57 | { 0, } /* Required last entry. */ |
58 | }; |
59 | MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids); |
60 | |
61 | static void nfp_netvf_get_mac_addr(struct nfp_net *nn) |
62 | { |
63 | u8 mac_addr[ETH_ALEN]; |
64 | |
65 | put_unaligned_be32(val: nn_readl(nn, NFP_NET_CFG_MACADDR + 0), p: &mac_addr[0]); |
66 | put_unaligned_be16(val: nn_readw(nn, NFP_NET_CFG_MACADDR + 6), p: &mac_addr[4]); |
67 | |
68 | if (!is_valid_ether_addr(addr: mac_addr)) { |
69 | eth_hw_addr_random(dev: nn->dp.netdev); |
70 | return; |
71 | } |
72 | |
73 | eth_hw_addr_set(dev: nn->dp.netdev, addr: mac_addr); |
74 | ether_addr_copy(dst: nn->dp.netdev->perm_addr, src: mac_addr); |
75 | } |
76 | |
77 | static int nfp_netvf_pci_probe(struct pci_dev *pdev, |
78 | const struct pci_device_id *pci_id) |
79 | { |
80 | const struct nfp_dev_info *dev_info; |
81 | struct nfp_net_fw_version fw_ver; |
82 | int max_tx_rings, max_rx_rings; |
83 | u32 tx_bar_off, rx_bar_off; |
84 | u32 tx_bar_sz, rx_bar_sz; |
85 | int tx_bar_no, rx_bar_no; |
86 | struct nfp_net_vf *vf; |
87 | unsigned int num_irqs; |
88 | u8 __iomem *ctrl_bar; |
89 | struct nfp_net *nn; |
90 | u32 startq; |
91 | int stride; |
92 | int err; |
93 | |
94 | dev_info = &nfp_dev_info[pci_id->driver_data]; |
95 | |
96 | vf = kzalloc(size: sizeof(*vf), GFP_KERNEL); |
97 | if (!vf) |
98 | return -ENOMEM; |
99 | pci_set_drvdata(pdev, data: vf); |
100 | |
101 | err = pci_enable_device_mem(dev: pdev); |
102 | if (err) |
103 | goto err_free_vf; |
104 | |
105 | err = pci_request_regions(pdev, nfp_net_driver_name); |
106 | if (err) { |
107 | dev_err(&pdev->dev, "Unable to allocate device memory.\n" ); |
108 | goto err_pci_disable; |
109 | } |
110 | |
111 | pci_set_master(dev: pdev); |
112 | |
113 | err = dma_set_mask_and_coherent(dev: &pdev->dev, mask: dev_info->dma_mask); |
114 | if (err) |
115 | goto err_pci_regions; |
116 | |
117 | /* Map the Control BAR. |
118 | * |
119 | * Irrespective of the advertised BAR size we only map the |
120 | * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code |
121 | * the identical for PF and VF drivers. |
122 | */ |
123 | ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR), |
124 | NFP_NET_CFG_BAR_SZ); |
125 | if (!ctrl_bar) { |
126 | dev_err(&pdev->dev, |
127 | "Failed to map resource %d\n" , NFP_NET_CTRL_BAR); |
128 | err = -EIO; |
129 | goto err_pci_regions; |
130 | } |
131 | |
132 | nfp_net_get_fw_version(fw_ver: &fw_ver, ctrl_bar); |
133 | if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || |
134 | fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { |
135 | dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n" , |
136 | fw_ver.extend, fw_ver.class, |
137 | fw_ver.major, fw_ver.minor); |
138 | err = -EINVAL; |
139 | goto err_ctrl_unmap; |
140 | } |
141 | |
142 | /* Determine stride */ |
143 | if (nfp_net_fw_ver_eq(fw_ver: &fw_ver, extend: 0, class: 0, major: 0, minor: 1)) { |
144 | stride = 2; |
145 | tx_bar_no = NFP_NET_Q0_BAR; |
146 | rx_bar_no = NFP_NET_Q1_BAR; |
147 | dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n" ); |
148 | } else { |
149 | switch (fw_ver.major) { |
150 | case 1 ... 5: |
151 | stride = 4; |
152 | tx_bar_no = NFP_NET_Q0_BAR; |
153 | rx_bar_no = tx_bar_no; |
154 | break; |
155 | default: |
156 | dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n" , |
157 | fw_ver.extend, fw_ver.class, |
158 | fw_ver.major, fw_ver.minor); |
159 | err = -EINVAL; |
160 | goto err_ctrl_unmap; |
161 | } |
162 | } |
163 | |
164 | /* Find out how many rings are supported */ |
165 | max_tx_rings = readl(addr: ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); |
166 | max_rx_rings = readl(addr: ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); |
167 | |
168 | tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride; |
169 | rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride; |
170 | |
171 | /* Sanity checks */ |
172 | if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) { |
173 | dev_err(&pdev->dev, |
174 | "TX BAR too small for number of TX rings. Adjusting\n" ); |
175 | tx_bar_sz = pci_resource_len(pdev, tx_bar_no); |
176 | max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; |
177 | } |
178 | if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) { |
179 | dev_err(&pdev->dev, |
180 | "RX BAR too small for number of RX rings. Adjusting\n" ); |
181 | rx_bar_sz = pci_resource_len(pdev, rx_bar_no); |
182 | max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; |
183 | } |
184 | |
185 | startq = readl(addr: ctrl_bar + NFP_NET_CFG_START_TXQ); |
186 | tx_bar_off = nfp_qcp_queue_offset(dev_info, queue: startq); |
187 | startq = readl(addr: ctrl_bar + NFP_NET_CFG_START_RXQ); |
188 | rx_bar_off = nfp_qcp_queue_offset(dev_info, queue: startq); |
189 | |
190 | /* Allocate and initialise the netdev */ |
191 | nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, needs_netdev: true, |
192 | max_tx_rings, max_rx_rings); |
193 | if (IS_ERR(ptr: nn)) { |
194 | err = PTR_ERR(ptr: nn); |
195 | goto err_ctrl_unmap; |
196 | } |
197 | vf->nn = nn; |
198 | |
199 | nn->dp.is_vf = 1; |
200 | nn->stride_tx = stride; |
201 | nn->stride_rx = stride; |
202 | |
203 | if (rx_bar_no == tx_bar_no) { |
204 | u32 bar_off, bar_sz; |
205 | resource_size_t map_addr; |
206 | |
207 | /* Make a single overlapping BAR mapping */ |
208 | if (tx_bar_off < rx_bar_off) |
209 | bar_off = tx_bar_off; |
210 | else |
211 | bar_off = rx_bar_off; |
212 | |
213 | if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz)) |
214 | bar_sz = (tx_bar_off + tx_bar_sz) - bar_off; |
215 | else |
216 | bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; |
217 | |
218 | map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; |
219 | vf->q_bar = ioremap(offset: map_addr, size: bar_sz); |
220 | if (!vf->q_bar) { |
221 | nn_err(nn, "Failed to map resource %d\n" , tx_bar_no); |
222 | err = -EIO; |
223 | goto err_netdev_free; |
224 | } |
225 | |
226 | /* TX queues */ |
227 | nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off); |
228 | /* RX queues */ |
229 | nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off); |
230 | } else { |
231 | resource_size_t map_addr; |
232 | |
233 | /* TX queues */ |
234 | map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; |
235 | nn->tx_bar = ioremap(offset: map_addr, size: tx_bar_sz); |
236 | if (!nn->tx_bar) { |
237 | nn_err(nn, "Failed to map resource %d\n" , tx_bar_no); |
238 | err = -EIO; |
239 | goto err_netdev_free; |
240 | } |
241 | |
242 | /* RX queues */ |
243 | map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; |
244 | nn->rx_bar = ioremap(offset: map_addr, size: rx_bar_sz); |
245 | if (!nn->rx_bar) { |
246 | nn_err(nn, "Failed to map resource %d\n" , rx_bar_no); |
247 | err = -EIO; |
248 | goto err_unmap_tx; |
249 | } |
250 | } |
251 | |
252 | nfp_netvf_get_mac_addr(nn); |
253 | |
254 | num_irqs = nfp_net_irqs_alloc(pdev, irq_entries: vf->irq_entries, |
255 | NFP_NET_MIN_VNIC_IRQS, |
256 | NFP_NET_NON_Q_VECTORS + |
257 | nn->dp.num_r_vecs); |
258 | if (!num_irqs) { |
259 | nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n" ); |
260 | err = -EIO; |
261 | goto err_unmap_rx; |
262 | } |
263 | nfp_net_irqs_assign(nn, irq_entries: vf->irq_entries, n: num_irqs); |
264 | |
265 | err = nfp_net_init(nn); |
266 | if (err) |
267 | goto err_irqs_disable; |
268 | |
269 | nfp_net_info(nn); |
270 | vf->ddir = nfp_net_debugfs_device_add(pdev); |
271 | nfp_net_debugfs_vnic_add(nn, ddir: vf->ddir); |
272 | |
273 | return 0; |
274 | |
275 | err_irqs_disable: |
276 | nfp_net_irqs_disable(pdev); |
277 | err_unmap_rx: |
278 | if (!vf->q_bar) |
279 | iounmap(addr: nn->rx_bar); |
280 | err_unmap_tx: |
281 | if (!vf->q_bar) |
282 | iounmap(addr: nn->tx_bar); |
283 | else |
284 | iounmap(addr: vf->q_bar); |
285 | err_netdev_free: |
286 | nfp_net_free(nn); |
287 | err_ctrl_unmap: |
288 | iounmap(addr: ctrl_bar); |
289 | err_pci_regions: |
290 | pci_release_regions(pdev); |
291 | err_pci_disable: |
292 | pci_disable_device(dev: pdev); |
293 | err_free_vf: |
294 | pci_set_drvdata(pdev, NULL); |
295 | kfree(objp: vf); |
296 | return err; |
297 | } |
298 | |
299 | static void nfp_netvf_pci_remove(struct pci_dev *pdev) |
300 | { |
301 | struct nfp_net_vf *vf; |
302 | struct nfp_net *nn; |
303 | |
304 | vf = pci_get_drvdata(pdev); |
305 | if (!vf) |
306 | return; |
307 | |
308 | nn = vf->nn; |
309 | |
310 | /* Note, the order is slightly different from above as we need |
311 | * to keep the nn pointer around till we have freed everything. |
312 | */ |
313 | nfp_net_debugfs_dir_clean(dir: &nn->debugfs_dir); |
314 | nfp_net_debugfs_dir_clean(dir: &vf->ddir); |
315 | |
316 | nfp_net_clean(nn); |
317 | |
318 | nfp_net_irqs_disable(pdev); |
319 | |
320 | if (!vf->q_bar) { |
321 | iounmap(addr: nn->rx_bar); |
322 | iounmap(addr: nn->tx_bar); |
323 | } else { |
324 | iounmap(addr: vf->q_bar); |
325 | } |
326 | iounmap(addr: nn->dp.ctrl_bar); |
327 | |
328 | nfp_net_free(nn); |
329 | |
330 | pci_release_regions(pdev); |
331 | pci_disable_device(dev: pdev); |
332 | |
333 | pci_set_drvdata(pdev, NULL); |
334 | kfree(objp: vf); |
335 | } |
336 | |
337 | struct pci_driver nfp_netvf_pci_driver = { |
338 | .name = nfp_net_driver_name, |
339 | .id_table = nfp_netvf_pci_device_ids, |
340 | .probe = nfp_netvf_pci_probe, |
341 | .remove = nfp_netvf_pci_remove, |
342 | .shutdown = nfp_netvf_pci_remove, |
343 | }; |
344 | |