1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This file is based on code from OCTEON SDK by Cavium Networks. |
4 | * |
5 | * Copyright (c) 2003-2007 Cavium Networks |
6 | */ |
7 | |
8 | #include <linux/platform_device.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/netdevice.h> |
12 | #include <linux/etherdevice.h> |
13 | #include <linux/phy.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/of_mdio.h> |
17 | #include <linux/of_net.h> |
18 | #include <linux/if_ether.h> |
19 | #include <linux/if_vlan.h> |
20 | |
21 | #include <net/dst.h> |
22 | |
23 | #include "octeon-ethernet.h" |
24 | #include "ethernet-defines.h" |
25 | #include "ethernet-mem.h" |
26 | #include "ethernet-rx.h" |
27 | #include "ethernet-tx.h" |
28 | #include "ethernet-mdio.h" |
29 | #include "ethernet-util.h" |
30 | |
31 | #define OCTEON_MAX_MTU 65392 |
32 | |
33 | static int num_packet_buffers = 1024; |
34 | module_param(num_packet_buffers, int, 0444); |
35 | MODULE_PARM_DESC(num_packet_buffers, "\n" |
36 | "\tNumber of packet buffers to allocate and store in the\n" |
37 | "\tFPA. By default, 1024 packet buffers are used.\n" ); |
38 | |
39 | static int pow_receive_group = 15; |
40 | module_param(pow_receive_group, int, 0444); |
41 | MODULE_PARM_DESC(pow_receive_group, "\n" |
42 | "\tPOW group to receive packets from. All ethernet hardware\n" |
43 | "\twill be configured to send incoming packets to this POW\n" |
44 | "\tgroup. Also any other software can submit packets to this\n" |
45 | "\tgroup for the kernel to process." ); |
46 | |
47 | static int receive_group_order; |
48 | module_param(receive_group_order, int, 0444); |
49 | MODULE_PARM_DESC(receive_group_order, "\n" |
50 | "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n" |
51 | "\twill be configured to send incoming packets to multiple POW\n" |
52 | "\tgroups. pow_receive_group parameter is ignored when multiple\n" |
53 | "\tgroups are taken into use and groups are allocated starting\n" |
54 | "\tfrom 0. By default, a single group is used.\n" ); |
55 | |
56 | int pow_send_group = -1; |
57 | module_param(pow_send_group, int, 0644); |
58 | MODULE_PARM_DESC(pow_send_group, "\n" |
59 | "\tPOW group to send packets to other software on. This\n" |
60 | "\tcontrols the creation of the virtual device pow0.\n" |
61 | "\talways_use_pow also depends on this value." ); |
62 | |
63 | int always_use_pow; |
64 | module_param(always_use_pow, int, 0444); |
65 | MODULE_PARM_DESC(always_use_pow, "\n" |
66 | "\tWhen set, always send to the pow group. This will cause\n" |
67 | "\tpackets sent to real ethernet devices to be sent to the\n" |
68 | "\tPOW group instead of the hardware. Unless some other\n" |
69 | "\tapplication changes the config, packets will still be\n" |
70 | "\treceived from the low level hardware. Use this option\n" |
71 | "\tto allow a CVMX app to intercept all packets from the\n" |
72 | "\tlinux kernel. You must specify pow_send_group along with\n" |
73 | "\tthis option." ); |
74 | |
75 | char pow_send_list[128] = "" ; |
76 | module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); |
77 | MODULE_PARM_DESC(pow_send_list, "\n" |
78 | "\tComma separated list of ethernet devices that should use the\n" |
79 | "\tPOW for transmit instead of the actual ethernet hardware. This\n" |
80 | "\tis a per port version of always_use_pow. always_use_pow takes\n" |
81 | "\tprecedence over this list. For example, setting this to\n" |
82 | "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" |
83 | "\tusing the pow_send_group." ); |
84 | |
85 | int rx_napi_weight = 32; |
86 | module_param(rx_napi_weight, int, 0444); |
87 | MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter." ); |
88 | |
89 | /* Mask indicating which receive groups are in use. */ |
90 | int pow_receive_groups; |
91 | |
92 | /* |
93 | * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. |
94 | * |
95 | * Set to one right before cvm_oct_poll_queue is destroyed. |
96 | */ |
97 | atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); |
98 | |
99 | /* |
100 | * Array of every ethernet device owned by this driver indexed by |
101 | * the ipd input port number. |
102 | */ |
103 | struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; |
104 | |
105 | u64 cvm_oct_tx_poll_interval; |
106 | |
107 | static void cvm_oct_rx_refill_worker(struct work_struct *work); |
108 | static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); |
109 | |
110 | static void cvm_oct_rx_refill_worker(struct work_struct *work) |
111 | { |
112 | /* |
113 | * FPA 0 may have been drained, try to refill it if we need |
114 | * more than num_packet_buffers / 2, otherwise normal receive |
115 | * processing will refill it. If it were drained, no packets |
116 | * could be received so cvm_oct_napi_poll would never be |
117 | * invoked to do the refill. |
118 | */ |
119 | cvm_oct_rx_refill_pool(fill_threshold: num_packet_buffers / 2); |
120 | |
121 | if (!atomic_read(v: &cvm_oct_poll_queue_stopping)) |
122 | schedule_delayed_work(dwork: &cvm_oct_rx_refill_work, HZ); |
123 | } |
124 | |
125 | static void cvm_oct_periodic_worker(struct work_struct *work) |
126 | { |
127 | struct octeon_ethernet *priv = container_of(work, |
128 | struct octeon_ethernet, |
129 | port_periodic_work.work); |
130 | |
131 | if (priv->poll) |
132 | priv->poll(cvm_oct_device[priv->port]); |
133 | |
134 | cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats |
135 | (cvm_oct_device[priv->port]); |
136 | |
137 | if (!atomic_read(v: &cvm_oct_poll_queue_stopping)) |
138 | schedule_delayed_work(dwork: &priv->port_periodic_work, HZ); |
139 | } |
140 | |
141 | static void cvm_oct_configure_common_hw(void) |
142 | { |
143 | /* Setup the FPA */ |
144 | cvmx_fpa_enable(); |
145 | cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
146 | elements: num_packet_buffers); |
147 | cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
148 | elements: num_packet_buffers); |
149 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
150 | cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
151 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, elements: 1024); |
152 | |
153 | #ifdef __LITTLE_ENDIAN |
154 | { |
155 | union cvmx_ipd_ctl_status ipd_ctl_status; |
156 | |
157 | ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); |
158 | ipd_ctl_status.s.pkt_lend = 1; |
159 | ipd_ctl_status.s.wqe_lend = 1; |
160 | cvmx_write_csr(CVMX_IPD_CTL_STATUS, val: ipd_ctl_status.u64); |
161 | } |
162 | #endif |
163 | |
164 | cvmx_helper_setup_red(pass_thresh: num_packet_buffers / 4, drop_thresh: num_packet_buffers / 8); |
165 | } |
166 | |
167 | /** |
168 | * cvm_oct_free_work- Free a work queue entry |
169 | * |
170 | * @work_queue_entry: Work queue entry to free |
171 | * |
172 | * Returns Zero on success, Negative on failure. |
173 | */ |
174 | int cvm_oct_free_work(void *work_queue_entry) |
175 | { |
176 | struct cvmx_wqe *work = work_queue_entry; |
177 | |
178 | int segments = work->word2.s.bufs; |
179 | union cvmx_buf_ptr segment_ptr = work->packet_ptr; |
180 | |
181 | while (segments--) { |
182 | union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) |
183 | cvmx_phys_to_ptr(physical_address: segment_ptr.s.addr - 8); |
184 | if (unlikely(!segment_ptr.s.i)) |
185 | cvmx_fpa_free(ptr: cvm_oct_get_buffer_ptr(packet_ptr: segment_ptr), |
186 | pool: segment_ptr.s.pool, |
187 | CVMX_FPA_PACKET_POOL_SIZE / 128); |
188 | segment_ptr = next_ptr; |
189 | } |
190 | cvmx_fpa_free(ptr: work, CVMX_FPA_WQE_POOL, num_cache_lines: 1); |
191 | |
192 | return 0; |
193 | } |
194 | EXPORT_SYMBOL(cvm_oct_free_work); |
195 | |
196 | /** |
197 | * cvm_oct_common_get_stats - get the low level ethernet statistics |
198 | * @dev: Device to get the statistics from |
199 | * |
200 | * Returns Pointer to the statistics |
201 | */ |
202 | static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) |
203 | { |
204 | cvmx_pip_port_status_t rx_status; |
205 | cvmx_pko_port_status_t tx_status; |
206 | struct octeon_ethernet *priv = netdev_priv(dev); |
207 | |
208 | if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { |
209 | if (octeon_is_simulation()) { |
210 | /* The simulator doesn't support statistics */ |
211 | memset(&rx_status, 0, sizeof(rx_status)); |
212 | memset(&tx_status, 0, sizeof(tx_status)); |
213 | } else { |
214 | cvmx_pip_get_port_status(port_num: priv->port, clear: 1, status: &rx_status); |
215 | cvmx_pko_get_port_status(port_num: priv->port, clear: 1, status: &tx_status); |
216 | } |
217 | |
218 | dev->stats.rx_packets += rx_status.inb_packets; |
219 | dev->stats.tx_packets += tx_status.packets; |
220 | dev->stats.rx_bytes += rx_status.inb_octets; |
221 | dev->stats.tx_bytes += tx_status.octets; |
222 | dev->stats.multicast += rx_status.multicast_packets; |
223 | dev->stats.rx_crc_errors += rx_status.inb_errors; |
224 | dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets; |
225 | dev->stats.rx_dropped += rx_status.dropped_packets; |
226 | } |
227 | |
228 | return &dev->stats; |
229 | } |
230 | |
231 | /** |
232 | * cvm_oct_common_change_mtu - change the link MTU |
233 | * @dev: Device to change |
234 | * @new_mtu: The new MTU |
235 | * |
236 | * Returns Zero on success |
237 | */ |
238 | static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) |
239 | { |
240 | struct octeon_ethernet *priv = netdev_priv(dev); |
241 | int interface = INTERFACE(ipd_port: priv->port); |
242 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
243 | int vlan_bytes = VLAN_HLEN; |
244 | #else |
245 | int vlan_bytes = 0; |
246 | #endif |
247 | int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes; |
248 | |
249 | dev->mtu = new_mtu; |
250 | |
251 | if ((interface < 2) && |
252 | (cvmx_helper_interface_get_mode(interface) != |
253 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
254 | int index = INDEX(ipd_port: priv->port); |
255 | /* Add ethernet header and FCS, and VLAN if configured. */ |
256 | int max_packet = new_mtu + mtu_overhead; |
257 | |
258 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || |
259 | OCTEON_IS_MODEL(OCTEON_CN58XX)) { |
260 | /* Signal errors on packets larger than the MTU */ |
261 | cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), |
262 | val: max_packet); |
263 | } else { |
264 | /* |
265 | * Set the hardware to truncate packets larger |
266 | * than the MTU and smaller the 64 bytes. |
267 | */ |
268 | union cvmx_pip_frm_len_chkx frm_len_chk; |
269 | |
270 | frm_len_chk.u64 = 0; |
271 | frm_len_chk.s.minlen = VLAN_ETH_ZLEN; |
272 | frm_len_chk.s.maxlen = max_packet; |
273 | cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), |
274 | val: frm_len_chk.u64); |
275 | } |
276 | /* |
277 | * Set the hardware to truncate packets larger than |
278 | * the MTU. The jabber register must be set to a |
279 | * multiple of 8 bytes, so round up. |
280 | */ |
281 | cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), |
282 | val: (max_packet + 7) & ~7u); |
283 | } |
284 | return 0; |
285 | } |
286 | |
287 | /** |
288 | * cvm_oct_common_set_multicast_list - set the multicast list |
289 | * @dev: Device to work on |
290 | */ |
291 | static void cvm_oct_common_set_multicast_list(struct net_device *dev) |
292 | { |
293 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
294 | struct octeon_ethernet *priv = netdev_priv(dev); |
295 | int interface = INTERFACE(ipd_port: priv->port); |
296 | |
297 | if ((interface < 2) && |
298 | (cvmx_helper_interface_get_mode(interface) != |
299 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
300 | union cvmx_gmxx_rxx_adr_ctl control; |
301 | int index = INDEX(ipd_port: priv->port); |
302 | |
303 | control.u64 = 0; |
304 | control.s.bcst = 1; /* Allow broadcast MAC addresses */ |
305 | |
306 | if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || |
307 | (dev->flags & IFF_PROMISC)) |
308 | /* Force accept multicast packets */ |
309 | control.s.mcst = 2; |
310 | else |
311 | /* Force reject multicast packets */ |
312 | control.s.mcst = 1; |
313 | |
314 | if (dev->flags & IFF_PROMISC) |
315 | /* |
316 | * Reject matches if promisc. Since CAM is |
317 | * shut off, should accept everything. |
318 | */ |
319 | control.s.cam_mode = 0; |
320 | else |
321 | /* Filter packets based on the CAM */ |
322 | control.s.cam_mode = 1; |
323 | |
324 | gmx_cfg.u64 = |
325 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
326 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
327 | val: gmx_cfg.u64 & ~1ull); |
328 | |
329 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), |
330 | val: control.u64); |
331 | if (dev->flags & IFF_PROMISC) |
332 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
333 | (index, interface), val: 0); |
334 | else |
335 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
336 | (index, interface), val: 1); |
337 | |
338 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
339 | val: gmx_cfg.u64); |
340 | } |
341 | } |
342 | |
343 | static int cvm_oct_set_mac_filter(struct net_device *dev) |
344 | { |
345 | struct octeon_ethernet *priv = netdev_priv(dev); |
346 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
347 | int interface = INTERFACE(ipd_port: priv->port); |
348 | |
349 | if ((interface < 2) && |
350 | (cvmx_helper_interface_get_mode(interface) != |
351 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
352 | int i; |
353 | const u8 *ptr = dev->dev_addr; |
354 | u64 mac = 0; |
355 | int index = INDEX(ipd_port: priv->port); |
356 | |
357 | for (i = 0; i < 6; i++) |
358 | mac = (mac << 8) | (u64)ptr[i]; |
359 | |
360 | gmx_cfg.u64 = |
361 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
362 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
363 | val: gmx_cfg.u64 & ~1ull); |
364 | |
365 | cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), val: mac); |
366 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), |
367 | val: ptr[0]); |
368 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), |
369 | val: ptr[1]); |
370 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), |
371 | val: ptr[2]); |
372 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), |
373 | val: ptr[3]); |
374 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), |
375 | val: ptr[4]); |
376 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), |
377 | val: ptr[5]); |
378 | cvm_oct_common_set_multicast_list(dev); |
379 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
380 | val: gmx_cfg.u64); |
381 | } |
382 | return 0; |
383 | } |
384 | |
385 | /** |
386 | * cvm_oct_common_set_mac_address - set the hardware MAC address for a device |
387 | * @dev: The device in question. |
388 | * @addr: Socket address. |
389 | * |
390 | * Returns Zero on success |
391 | */ |
392 | static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) |
393 | { |
394 | int r = eth_mac_addr(dev, p: addr); |
395 | |
396 | if (r) |
397 | return r; |
398 | return cvm_oct_set_mac_filter(dev); |
399 | } |
400 | |
401 | /** |
402 | * cvm_oct_common_init - per network device initialization |
403 | * @dev: Device to initialize |
404 | * |
405 | * Returns Zero on success |
406 | */ |
407 | int cvm_oct_common_init(struct net_device *dev) |
408 | { |
409 | struct octeon_ethernet *priv = netdev_priv(dev); |
410 | int ret; |
411 | |
412 | ret = of_get_ethdev_address(np: priv->of_node, dev); |
413 | if (ret) |
414 | eth_hw_addr_random(dev); |
415 | |
416 | /* |
417 | * Force the interface to use the POW send if always_use_pow |
418 | * was specified or it is in the pow send list. |
419 | */ |
420 | if ((pow_send_group != -1) && |
421 | (always_use_pow || strstr(pow_send_list, dev->name))) |
422 | priv->queue = -1; |
423 | |
424 | if (priv->queue != -1) |
425 | dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; |
426 | |
427 | /* We do our own locking, Linux doesn't need to */ |
428 | dev->features |= NETIF_F_LLTX; |
429 | dev->ethtool_ops = &cvm_oct_ethtool_ops; |
430 | |
431 | cvm_oct_set_mac_filter(dev); |
432 | dev_set_mtu(dev, dev->mtu); |
433 | |
434 | /* |
435 | * Zero out stats for port so we won't mistakenly show |
436 | * counters from the bootloader. |
437 | */ |
438 | memset(dev->netdev_ops->ndo_get_stats(dev), 0, |
439 | sizeof(struct net_device_stats)); |
440 | |
441 | if (dev->netdev_ops->ndo_stop) |
442 | dev->netdev_ops->ndo_stop(dev); |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | void cvm_oct_common_uninit(struct net_device *dev) |
448 | { |
449 | if (dev->phydev) |
450 | phy_disconnect(phydev: dev->phydev); |
451 | } |
452 | |
453 | int cvm_oct_common_open(struct net_device *dev, |
454 | void (*link_poll)(struct net_device *)) |
455 | { |
456 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
457 | struct octeon_ethernet *priv = netdev_priv(dev); |
458 | int interface = INTERFACE(ipd_port: priv->port); |
459 | int index = INDEX(ipd_port: priv->port); |
460 | union cvmx_helper_link_info link_info; |
461 | int rv; |
462 | |
463 | rv = cvm_oct_phy_setup_device(dev); |
464 | if (rv) |
465 | return rv; |
466 | |
467 | gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
468 | gmx_cfg.s.en = 1; |
469 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) |
470 | gmx_cfg.s.pknd = priv->port; |
471 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), val: gmx_cfg.u64); |
472 | |
473 | if (octeon_is_simulation()) |
474 | return 0; |
475 | |
476 | if (dev->phydev) { |
477 | int r = phy_read_status(phydev: dev->phydev); |
478 | |
479 | if (r == 0 && dev->phydev->link == 0) |
480 | netif_carrier_off(dev); |
481 | cvm_oct_adjust_link(dev); |
482 | } else { |
483 | link_info = cvmx_helper_link_get(ipd_port: priv->port); |
484 | if (!link_info.s.link_up) |
485 | netif_carrier_off(dev); |
486 | priv->poll = link_poll; |
487 | link_poll(dev); |
488 | } |
489 | |
490 | return 0; |
491 | } |
492 | |
493 | void cvm_oct_link_poll(struct net_device *dev) |
494 | { |
495 | struct octeon_ethernet *priv = netdev_priv(dev); |
496 | union cvmx_helper_link_info link_info; |
497 | |
498 | link_info = cvmx_helper_link_get(ipd_port: priv->port); |
499 | if (link_info.u64 == priv->link_info) |
500 | return; |
501 | |
502 | if (cvmx_helper_link_set(ipd_port: priv->port, link_info)) |
503 | link_info.u64 = priv->link_info; |
504 | else |
505 | priv->link_info = link_info.u64; |
506 | |
507 | if (link_info.s.link_up) { |
508 | if (!netif_carrier_ok(dev)) |
509 | netif_carrier_on(dev); |
510 | } else if (netif_carrier_ok(dev)) { |
511 | netif_carrier_off(dev); |
512 | } |
513 | cvm_oct_note_carrier(priv, li: link_info); |
514 | } |
515 | |
516 | static int cvm_oct_xaui_open(struct net_device *dev) |
517 | { |
518 | return cvm_oct_common_open(dev, link_poll: cvm_oct_link_poll); |
519 | } |
520 | |
521 | static const struct net_device_ops cvm_oct_npi_netdev_ops = { |
522 | .ndo_init = cvm_oct_common_init, |
523 | .ndo_uninit = cvm_oct_common_uninit, |
524 | .ndo_start_xmit = cvm_oct_xmit, |
525 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
526 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
527 | .ndo_eth_ioctl = cvm_oct_ioctl, |
528 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
529 | .ndo_get_stats = cvm_oct_common_get_stats, |
530 | #ifdef CONFIG_NET_POLL_CONTROLLER |
531 | .ndo_poll_controller = cvm_oct_poll_controller, |
532 | #endif |
533 | }; |
534 | |
535 | static const struct net_device_ops cvm_oct_xaui_netdev_ops = { |
536 | .ndo_init = cvm_oct_common_init, |
537 | .ndo_uninit = cvm_oct_common_uninit, |
538 | .ndo_open = cvm_oct_xaui_open, |
539 | .ndo_stop = cvm_oct_common_stop, |
540 | .ndo_start_xmit = cvm_oct_xmit, |
541 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
542 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
543 | .ndo_eth_ioctl = cvm_oct_ioctl, |
544 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
545 | .ndo_get_stats = cvm_oct_common_get_stats, |
546 | #ifdef CONFIG_NET_POLL_CONTROLLER |
547 | .ndo_poll_controller = cvm_oct_poll_controller, |
548 | #endif |
549 | }; |
550 | |
551 | static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { |
552 | .ndo_init = cvm_oct_sgmii_init, |
553 | .ndo_uninit = cvm_oct_common_uninit, |
554 | .ndo_open = cvm_oct_sgmii_open, |
555 | .ndo_stop = cvm_oct_common_stop, |
556 | .ndo_start_xmit = cvm_oct_xmit, |
557 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
558 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
559 | .ndo_eth_ioctl = cvm_oct_ioctl, |
560 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
561 | .ndo_get_stats = cvm_oct_common_get_stats, |
562 | #ifdef CONFIG_NET_POLL_CONTROLLER |
563 | .ndo_poll_controller = cvm_oct_poll_controller, |
564 | #endif |
565 | }; |
566 | |
567 | static const struct net_device_ops cvm_oct_spi_netdev_ops = { |
568 | .ndo_init = cvm_oct_spi_init, |
569 | .ndo_uninit = cvm_oct_spi_uninit, |
570 | .ndo_start_xmit = cvm_oct_xmit, |
571 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
572 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
573 | .ndo_eth_ioctl = cvm_oct_ioctl, |
574 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
575 | .ndo_get_stats = cvm_oct_common_get_stats, |
576 | #ifdef CONFIG_NET_POLL_CONTROLLER |
577 | .ndo_poll_controller = cvm_oct_poll_controller, |
578 | #endif |
579 | }; |
580 | |
581 | static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { |
582 | .ndo_init = cvm_oct_common_init, |
583 | .ndo_uninit = cvm_oct_common_uninit, |
584 | .ndo_open = cvm_oct_rgmii_open, |
585 | .ndo_stop = cvm_oct_common_stop, |
586 | .ndo_start_xmit = cvm_oct_xmit, |
587 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
588 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
589 | .ndo_eth_ioctl = cvm_oct_ioctl, |
590 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
591 | .ndo_get_stats = cvm_oct_common_get_stats, |
592 | #ifdef CONFIG_NET_POLL_CONTROLLER |
593 | .ndo_poll_controller = cvm_oct_poll_controller, |
594 | #endif |
595 | }; |
596 | |
597 | static const struct net_device_ops cvm_oct_pow_netdev_ops = { |
598 | .ndo_init = cvm_oct_common_init, |
599 | .ndo_start_xmit = cvm_oct_xmit_pow, |
600 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
601 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
602 | .ndo_eth_ioctl = cvm_oct_ioctl, |
603 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
604 | .ndo_get_stats = cvm_oct_common_get_stats, |
605 | #ifdef CONFIG_NET_POLL_CONTROLLER |
606 | .ndo_poll_controller = cvm_oct_poll_controller, |
607 | #endif |
608 | }; |
609 | |
610 | static struct device_node *cvm_oct_of_get_child |
611 | (const struct device_node *parent, int reg_val) |
612 | { |
613 | struct device_node *node; |
614 | const __be32 *addr; |
615 | int size; |
616 | |
617 | for_each_child_of_node(parent, node) { |
618 | addr = of_get_property(node, name: "reg" , lenp: &size); |
619 | if (addr && (be32_to_cpu(*addr) == reg_val)) |
620 | break; |
621 | } |
622 | return node; |
623 | } |
624 | |
625 | static struct device_node *cvm_oct_node_for_port(struct device_node *pip, |
626 | int interface, int port) |
627 | { |
628 | struct device_node *ni, *np; |
629 | |
630 | ni = cvm_oct_of_get_child(parent: pip, reg_val: interface); |
631 | if (!ni) |
632 | return NULL; |
633 | |
634 | np = cvm_oct_of_get_child(parent: ni, reg_val: port); |
635 | of_node_put(node: ni); |
636 | |
637 | return np; |
638 | } |
639 | |
640 | static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface, |
641 | int port) |
642 | { |
643 | struct device_node *np = priv->of_node; |
644 | u32 delay_value; |
645 | bool rx_delay; |
646 | bool tx_delay; |
647 | |
648 | /* By default, both RX/TX delay is enabled in |
649 | * __cvmx_helper_rgmii_enable(). |
650 | */ |
651 | rx_delay = true; |
652 | tx_delay = true; |
653 | |
654 | if (!of_property_read_u32(np, propname: "rx-delay" , out_value: &delay_value)) { |
655 | cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), val: delay_value); |
656 | rx_delay = delay_value > 0; |
657 | } |
658 | if (!of_property_read_u32(np, propname: "tx-delay" , out_value: &delay_value)) { |
659 | cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), val: delay_value); |
660 | tx_delay = delay_value > 0; |
661 | } |
662 | |
663 | if (!rx_delay && !tx_delay) |
664 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; |
665 | else if (!rx_delay) |
666 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID; |
667 | else if (!tx_delay) |
668 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID; |
669 | else |
670 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII; |
671 | } |
672 | |
673 | static int cvm_oct_probe(struct platform_device *pdev) |
674 | { |
675 | int num_interfaces; |
676 | int interface; |
677 | int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; |
678 | int qos; |
679 | struct device_node *pip; |
680 | int mtu_overhead = ETH_HLEN + ETH_FCS_LEN; |
681 | |
682 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
683 | mtu_overhead += VLAN_HLEN; |
684 | #endif |
685 | |
686 | pip = pdev->dev.of_node; |
687 | if (!pip) { |
688 | pr_err("Error: No 'pip' in /aliases\n" ); |
689 | return -EINVAL; |
690 | } |
691 | |
692 | cvm_oct_configure_common_hw(); |
693 | |
694 | cvmx_helper_initialize_packet_io_global(); |
695 | |
696 | if (receive_group_order) { |
697 | if (receive_group_order > 4) |
698 | receive_group_order = 4; |
699 | pow_receive_groups = (1 << (1 << receive_group_order)) - 1; |
700 | } else { |
701 | pow_receive_groups = BIT(pow_receive_group); |
702 | } |
703 | |
704 | /* Change the input group for all ports before input is enabled */ |
705 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
706 | for (interface = 0; interface < num_interfaces; interface++) { |
707 | int num_ports = cvmx_helper_ports_on_interface(interface); |
708 | int port; |
709 | |
710 | for (port = cvmx_helper_get_ipd_port(interface, port: 0); |
711 | port < cvmx_helper_get_ipd_port(interface, port: num_ports); |
712 | port++) { |
713 | union cvmx_pip_prt_tagx pip_prt_tagx; |
714 | |
715 | pip_prt_tagx.u64 = |
716 | cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); |
717 | |
718 | if (receive_group_order) { |
719 | int tag_mask; |
720 | |
721 | /* We support only 16 groups at the moment, so |
722 | * always disable the two additional "hidden" |
723 | * tag_mask bits on CN68XX. |
724 | */ |
725 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
726 | pip_prt_tagx.u64 |= 0x3ull << 44; |
727 | |
728 | tag_mask = ~((1 << receive_group_order) - 1); |
729 | pip_prt_tagx.s.grptagbase = 0; |
730 | pip_prt_tagx.s.grptagmask = tag_mask; |
731 | pip_prt_tagx.s.grptag = 1; |
732 | pip_prt_tagx.s.tag_mode = 0; |
733 | pip_prt_tagx.s.inc_prt_flag = 1; |
734 | pip_prt_tagx.s.ip6_dprt_flag = 1; |
735 | pip_prt_tagx.s.ip4_dprt_flag = 1; |
736 | pip_prt_tagx.s.ip6_sprt_flag = 1; |
737 | pip_prt_tagx.s.ip4_sprt_flag = 1; |
738 | pip_prt_tagx.s.ip6_dst_flag = 1; |
739 | pip_prt_tagx.s.ip4_dst_flag = 1; |
740 | pip_prt_tagx.s.ip6_src_flag = 1; |
741 | pip_prt_tagx.s.ip4_src_flag = 1; |
742 | pip_prt_tagx.s.grp = 0; |
743 | } else { |
744 | pip_prt_tagx.s.grptag = 0; |
745 | pip_prt_tagx.s.grp = pow_receive_group; |
746 | } |
747 | |
748 | cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), |
749 | val: pip_prt_tagx.u64); |
750 | } |
751 | } |
752 | |
753 | cvmx_helper_ipd_and_packet_input_enable(); |
754 | |
755 | memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); |
756 | |
757 | /* |
758 | * Initialize the FAU used for counting packet buffers that |
759 | * need to be freed. |
760 | */ |
761 | cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, value: 0); |
762 | |
763 | /* Initialize the FAU used for counting tx SKBs that need to be freed */ |
764 | cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, value: 0); |
765 | |
766 | if ((pow_send_group != -1)) { |
767 | struct net_device *dev; |
768 | |
769 | dev = alloc_etherdev(sizeof(struct octeon_ethernet)); |
770 | if (dev) { |
771 | /* Initialize the device private structure. */ |
772 | struct octeon_ethernet *priv = netdev_priv(dev); |
773 | |
774 | SET_NETDEV_DEV(dev, &pdev->dev); |
775 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
776 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
777 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; |
778 | priv->queue = -1; |
779 | strscpy(dev->name, "pow%d" , sizeof(dev->name)); |
780 | for (qos = 0; qos < 16; qos++) |
781 | skb_queue_head_init(list: &priv->tx_free_list[qos]); |
782 | dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; |
783 | dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; |
784 | |
785 | if (register_netdev(dev) < 0) { |
786 | pr_err("Failed to register ethernet device for POW\n" ); |
787 | free_netdev(dev); |
788 | } else { |
789 | cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; |
790 | pr_info("%s: POW send group %d, receive group %d\n" , |
791 | dev->name, pow_send_group, |
792 | pow_receive_group); |
793 | } |
794 | } else { |
795 | pr_err("Failed to allocate ethernet device for POW\n" ); |
796 | } |
797 | } |
798 | |
799 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
800 | for (interface = 0; interface < num_interfaces; interface++) { |
801 | cvmx_helper_interface_mode_t imode = |
802 | cvmx_helper_interface_get_mode(interface); |
803 | int num_ports = cvmx_helper_ports_on_interface(interface); |
804 | int port; |
805 | int port_index; |
806 | |
807 | for (port_index = 0, |
808 | port = cvmx_helper_get_ipd_port(interface, port: 0); |
809 | port < cvmx_helper_get_ipd_port(interface, port: num_ports); |
810 | port_index++, port++) { |
811 | struct octeon_ethernet *priv; |
812 | struct net_device *dev = |
813 | alloc_etherdev(sizeof(struct octeon_ethernet)); |
814 | if (!dev) { |
815 | pr_err("Failed to allocate ethernet device for port %d\n" , |
816 | port); |
817 | continue; |
818 | } |
819 | |
820 | /* Initialize the device private structure. */ |
821 | SET_NETDEV_DEV(dev, &pdev->dev); |
822 | priv = netdev_priv(dev); |
823 | priv->netdev = dev; |
824 | priv->of_node = cvm_oct_node_for_port(pip, interface, |
825 | port: port_index); |
826 | |
827 | INIT_DELAYED_WORK(&priv->port_periodic_work, |
828 | cvm_oct_periodic_worker); |
829 | priv->imode = imode; |
830 | priv->port = port; |
831 | priv->queue = cvmx_pko_get_base_queue(port: priv->port); |
832 | priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; |
833 | priv->phy_mode = PHY_INTERFACE_MODE_NA; |
834 | for (qos = 0; qos < 16; qos++) |
835 | skb_queue_head_init(list: &priv->tx_free_list[qos]); |
836 | for (qos = 0; qos < cvmx_pko_get_num_queues(port); |
837 | qos++) |
838 | cvmx_fau_atomic_write32(reg: priv->fau + qos * 4, value: 0); |
839 | dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; |
840 | dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; |
841 | |
842 | switch (priv->imode) { |
843 | /* These types don't support ports to IPD/PKO */ |
844 | case CVMX_HELPER_INTERFACE_MODE_DISABLED: |
845 | case CVMX_HELPER_INTERFACE_MODE_PCIE: |
846 | case CVMX_HELPER_INTERFACE_MODE_PICMG: |
847 | break; |
848 | |
849 | case CVMX_HELPER_INTERFACE_MODE_NPI: |
850 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
851 | strscpy(dev->name, "npi%d" , sizeof(dev->name)); |
852 | break; |
853 | |
854 | case CVMX_HELPER_INTERFACE_MODE_XAUI: |
855 | dev->netdev_ops = &cvm_oct_xaui_netdev_ops; |
856 | strscpy(dev->name, "xaui%d" , sizeof(dev->name)); |
857 | break; |
858 | |
859 | case CVMX_HELPER_INTERFACE_MODE_LOOP: |
860 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
861 | strscpy(dev->name, "loop%d" , sizeof(dev->name)); |
862 | break; |
863 | |
864 | case CVMX_HELPER_INTERFACE_MODE_SGMII: |
865 | priv->phy_mode = PHY_INTERFACE_MODE_SGMII; |
866 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; |
867 | strscpy(dev->name, "eth%d" , sizeof(dev->name)); |
868 | break; |
869 | |
870 | case CVMX_HELPER_INTERFACE_MODE_SPI: |
871 | dev->netdev_ops = &cvm_oct_spi_netdev_ops; |
872 | strscpy(dev->name, "spi%d" , sizeof(dev->name)); |
873 | break; |
874 | |
875 | case CVMX_HELPER_INTERFACE_MODE_GMII: |
876 | priv->phy_mode = PHY_INTERFACE_MODE_GMII; |
877 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
878 | strscpy(dev->name, "eth%d" , sizeof(dev->name)); |
879 | break; |
880 | |
881 | case CVMX_HELPER_INTERFACE_MODE_RGMII: |
882 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
883 | strscpy(dev->name, "eth%d" , sizeof(dev->name)); |
884 | cvm_set_rgmii_delay(priv, iface: interface, |
885 | port: port_index); |
886 | break; |
887 | } |
888 | |
889 | if (priv->of_node && of_phy_is_fixed_link(np: priv->of_node)) { |
890 | if (of_phy_register_fixed_link(np: priv->of_node)) { |
891 | netdev_err(dev, format: "Failed to register fixed link for interface %d, port %d\n" , |
892 | interface, priv->port); |
893 | dev->netdev_ops = NULL; |
894 | } |
895 | } |
896 | |
897 | if (!dev->netdev_ops) { |
898 | free_netdev(dev); |
899 | } else if (register_netdev(dev) < 0) { |
900 | pr_err("Failed to register ethernet device for interface %d, port %d\n" , |
901 | interface, priv->port); |
902 | free_netdev(dev); |
903 | } else { |
904 | cvm_oct_device[priv->port] = dev; |
905 | fau -= |
906 | cvmx_pko_get_num_queues(port: priv->port) * |
907 | sizeof(u32); |
908 | schedule_delayed_work(dwork: &priv->port_periodic_work, |
909 | HZ); |
910 | } |
911 | } |
912 | } |
913 | |
914 | cvm_oct_tx_initialize(); |
915 | cvm_oct_rx_initialize(); |
916 | |
917 | /* |
918 | * 150 uS: about 10 1500-byte packets at 1GE. |
919 | */ |
920 | cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); |
921 | |
922 | schedule_delayed_work(dwork: &cvm_oct_rx_refill_work, HZ); |
923 | |
924 | return 0; |
925 | } |
926 | |
927 | static void cvm_oct_remove(struct platform_device *pdev) |
928 | { |
929 | int port; |
930 | |
931 | cvmx_ipd_disable(); |
932 | |
933 | atomic_inc_return(v: &cvm_oct_poll_queue_stopping); |
934 | cancel_delayed_work_sync(dwork: &cvm_oct_rx_refill_work); |
935 | |
936 | cvm_oct_rx_shutdown(); |
937 | cvm_oct_tx_shutdown(); |
938 | |
939 | cvmx_pko_disable(); |
940 | |
941 | /* Free the ethernet devices */ |
942 | for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { |
943 | if (cvm_oct_device[port]) { |
944 | struct net_device *dev = cvm_oct_device[port]; |
945 | struct octeon_ethernet *priv = netdev_priv(dev); |
946 | |
947 | cancel_delayed_work_sync(dwork: &priv->port_periodic_work); |
948 | |
949 | cvm_oct_tx_shutdown_dev(dev); |
950 | unregister_netdev(dev); |
951 | free_netdev(dev); |
952 | cvm_oct_device[port] = NULL; |
953 | } |
954 | } |
955 | |
956 | cvmx_pko_shutdown(); |
957 | |
958 | cvmx_ipd_free_ptr(); |
959 | |
960 | /* Free the HW pools */ |
961 | cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
962 | elements: num_packet_buffers); |
963 | cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
964 | elements: num_packet_buffers); |
965 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
966 | cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
967 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, elements: 128); |
968 | } |
969 | |
970 | static const struct of_device_id cvm_oct_match[] = { |
971 | { |
972 | .compatible = "cavium,octeon-3860-pip" , |
973 | }, |
974 | {}, |
975 | }; |
976 | MODULE_DEVICE_TABLE(of, cvm_oct_match); |
977 | |
978 | static struct platform_driver cvm_oct_driver = { |
979 | .probe = cvm_oct_probe, |
980 | .remove_new = cvm_oct_remove, |
981 | .driver = { |
982 | .name = KBUILD_MODNAME, |
983 | .of_match_table = cvm_oct_match, |
984 | }, |
985 | }; |
986 | |
987 | module_platform_driver(cvm_oct_driver); |
988 | |
989 | MODULE_SOFTDEP("pre: mdio-cavium" ); |
990 | MODULE_LICENSE("GPL" ); |
991 | MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>" ); |
992 | MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver." ); |
993 | |