1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Atlantic Network Driver |
3 | * |
4 | * Copyright (C) 2014-2019 aQuantia Corporation |
5 | * Copyright (C) 2019-2020 Marvell International Ltd. |
6 | */ |
7 | |
8 | /* File aq_nic.c: Definition of common code for NIC. */ |
9 | |
10 | #include "aq_nic.h" |
11 | #include "aq_ring.h" |
12 | #include "aq_vec.h" |
13 | #include "aq_hw.h" |
14 | #include "aq_pci_func.h" |
15 | #include "aq_macsec.h" |
16 | #include "aq_main.h" |
17 | #include "aq_phy.h" |
18 | #include "aq_ptp.h" |
19 | #include "aq_filters.h" |
20 | |
21 | #include <linux/moduleparam.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/etherdevice.h> |
24 | #include <linux/timer.h> |
25 | #include <linux/cpu.h> |
26 | #include <linux/ip.h> |
27 | #include <linux/tcp.h> |
28 | #include <net/ip.h> |
29 | #include <net/pkt_cls.h> |
30 | |
31 | static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; |
32 | module_param_named(aq_itr, aq_itr, uint, 0644); |
33 | MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode" ); |
34 | |
35 | static unsigned int aq_itr_tx; |
36 | module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); |
37 | MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate" ); |
38 | |
39 | static unsigned int aq_itr_rx; |
40 | module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); |
41 | MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate" ); |
42 | |
43 | static void aq_nic_update_ndev_stats(struct aq_nic_s *self); |
44 | |
45 | static void (struct aq_nic_s *self, unsigned int ) |
46 | { |
47 | static u8 [AQ_CFG_RSS_HASHKEY_SIZE] = { |
48 | 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, |
49 | 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, |
50 | 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, |
51 | 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, |
52 | 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c |
53 | }; |
54 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
55 | struct aq_rss_parameters *; |
56 | int i = 0; |
57 | |
58 | rss_params = &cfg->aq_rss; |
59 | |
60 | rss_params->hash_secret_key_size = sizeof(rss_key); |
61 | memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); |
62 | rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; |
63 | |
64 | for (i = rss_params->indirection_table_size; i--;) |
65 | rss_params->indirection_table[i] = i & (num_rss_queues - 1); |
66 | } |
67 | |
68 | /* Recalculate the number of vectors */ |
69 | static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self) |
70 | { |
71 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
72 | |
73 | cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); |
74 | cfg->vecs = min(cfg->vecs, num_online_cpus()); |
75 | if (self->irqvecs > AQ_HW_SERVICE_IRQS) |
76 | cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); |
77 | /* cfg->vecs should be power of 2 for RSS */ |
78 | cfg->vecs = rounddown_pow_of_two(cfg->vecs); |
79 | |
80 | if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) { |
81 | if (cfg->tcs > 2) |
82 | cfg->vecs = min(cfg->vecs, 4U); |
83 | } |
84 | |
85 | if (cfg->vecs <= 4) |
86 | cfg->tc_mode = AQ_TC_MODE_8TCS; |
87 | else |
88 | cfg->tc_mode = AQ_TC_MODE_4TCS; |
89 | |
90 | /*rss rings */ |
91 | cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); |
92 | aq_nic_rss_init(self, num_rss_queues: cfg->num_rss_queues); |
93 | } |
94 | |
95 | /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ |
96 | void aq_nic_cfg_start(struct aq_nic_s *self) |
97 | { |
98 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
99 | int i; |
100 | |
101 | cfg->tcs = AQ_CFG_TCS_DEF; |
102 | |
103 | cfg->is_polling = AQ_CFG_IS_POLLING_DEF; |
104 | |
105 | cfg->itr = aq_itr; |
106 | cfg->tx_itr = aq_itr_tx; |
107 | cfg->rx_itr = aq_itr_rx; |
108 | |
109 | cfg->rxpageorder = AQ_CFG_RX_PAGEORDER; |
110 | cfg->is_rss = AQ_CFG_IS_RSS_DEF; |
111 | cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; |
112 | cfg->fc.req = AQ_CFG_FC_MODE; |
113 | cfg->wol = AQ_CFG_WOL_MODES; |
114 | |
115 | cfg->mtu = AQ_CFG_MTU_DEF; |
116 | cfg->link_speed_msk = AQ_CFG_SPEED_MSK; |
117 | cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; |
118 | |
119 | cfg->is_lro = AQ_CFG_IS_LRO_DEF; |
120 | cfg->is_ptp = true; |
121 | |
122 | /*descriptors */ |
123 | cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); |
124 | cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); |
125 | |
126 | aq_nic_cfg_update_num_vecs(self); |
127 | |
128 | cfg->irq_type = aq_pci_func_get_irq_type(self); |
129 | |
130 | if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || |
131 | (cfg->aq_hw_caps->vecs == 1U) || |
132 | (cfg->vecs == 1U)) { |
133 | cfg->is_rss = 0U; |
134 | cfg->vecs = 1U; |
135 | } |
136 | |
137 | /* Check if we have enough vectors allocated for |
138 | * link status IRQ. If no - we'll know link state from |
139 | * slower service task. |
140 | */ |
141 | if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs) |
142 | cfg->link_irq_vec = cfg->vecs; |
143 | else |
144 | cfg->link_irq_vec = 0; |
145 | |
146 | cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; |
147 | cfg->features = cfg->aq_hw_caps->hw_features; |
148 | cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX); |
149 | cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX); |
150 | cfg->is_vlan_force_promisc = true; |
151 | |
152 | for (i = 0; i < sizeof(cfg->prio_tc_map); i++) |
153 | cfg->prio_tc_map[i] = cfg->tcs * i / 8; |
154 | } |
155 | |
156 | static int aq_nic_update_link_status(struct aq_nic_s *self) |
157 | { |
158 | int err = self->aq_fw_ops->update_link_status(self->aq_hw); |
159 | u32 fc = 0; |
160 | |
161 | if (err) |
162 | return err; |
163 | |
164 | if (self->aq_fw_ops->get_flow_control) |
165 | self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); |
166 | self->aq_nic_cfg.fc.cur = fc; |
167 | |
168 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { |
169 | netdev_info(dev: self->ndev, format: "%s: link change old %d new %d\n" , |
170 | AQ_CFG_DRV_NAME, self->link_status.mbps, |
171 | self->aq_hw->aq_link_status.mbps); |
172 | aq_nic_update_interrupt_moderation_settings(self); |
173 | |
174 | if (self->aq_ptp) { |
175 | aq_ptp_clock_init(aq_nic: self); |
176 | aq_ptp_tm_offset_set(aq_nic: self, |
177 | mbps: self->aq_hw->aq_link_status.mbps); |
178 | aq_ptp_link_change(aq_nic: self); |
179 | } |
180 | |
181 | /* Driver has to update flow control settings on RX block |
182 | * on any link event. |
183 | * We should query FW whether it negotiated FC. |
184 | */ |
185 | if (self->aq_hw_ops->hw_set_fc) |
186 | self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); |
187 | } |
188 | |
189 | self->link_status = self->aq_hw->aq_link_status; |
190 | if (!netif_carrier_ok(dev: self->ndev) && self->link_status.mbps) { |
191 | aq_utils_obj_set(flags: &self->flags, |
192 | AQ_NIC_FLAG_STARTED); |
193 | aq_utils_obj_clear(flags: &self->flags, |
194 | AQ_NIC_LINK_DOWN); |
195 | netif_carrier_on(dev: self->ndev); |
196 | #if IS_ENABLED(CONFIG_MACSEC) |
197 | aq_macsec_enable(nic: self); |
198 | #endif |
199 | if (self->aq_hw_ops->hw_tc_rate_limit_set) |
200 | self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw); |
201 | |
202 | netif_tx_wake_all_queues(dev: self->ndev); |
203 | } |
204 | if (netif_carrier_ok(dev: self->ndev) && !self->link_status.mbps) { |
205 | netif_carrier_off(dev: self->ndev); |
206 | netif_tx_disable(dev: self->ndev); |
207 | aq_utils_obj_set(flags: &self->flags, AQ_NIC_LINK_DOWN); |
208 | } |
209 | |
210 | return 0; |
211 | } |
212 | |
213 | static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private) |
214 | { |
215 | struct aq_nic_s *self = private; |
216 | |
217 | if (!self) |
218 | return IRQ_NONE; |
219 | |
220 | aq_nic_update_link_status(self); |
221 | |
222 | self->aq_hw_ops->hw_irq_enable(self->aq_hw, |
223 | BIT(self->aq_nic_cfg.link_irq_vec)); |
224 | |
225 | return IRQ_HANDLED; |
226 | } |
227 | |
228 | static void aq_nic_service_task(struct work_struct *work) |
229 | { |
230 | struct aq_nic_s *self = container_of(work, struct aq_nic_s, |
231 | service_task); |
232 | int err; |
233 | |
234 | aq_ptp_service_task(aq_nic: self); |
235 | |
236 | if (aq_utils_obj_test(flags: &self->flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
237 | return; |
238 | |
239 | err = aq_nic_update_link_status(self); |
240 | if (err) |
241 | return; |
242 | |
243 | #if IS_ENABLED(CONFIG_MACSEC) |
244 | aq_macsec_work(nic: self); |
245 | #endif |
246 | |
247 | mutex_lock(&self->fwreq_mutex); |
248 | if (self->aq_fw_ops->update_stats) |
249 | self->aq_fw_ops->update_stats(self->aq_hw); |
250 | mutex_unlock(lock: &self->fwreq_mutex); |
251 | |
252 | aq_nic_update_ndev_stats(self); |
253 | } |
254 | |
255 | static void aq_nic_service_timer_cb(struct timer_list *t) |
256 | { |
257 | struct aq_nic_s *self = from_timer(self, t, service_timer); |
258 | |
259 | mod_timer(timer: &self->service_timer, |
260 | expires: jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); |
261 | |
262 | aq_ndev_schedule_work(work: &self->service_task); |
263 | } |
264 | |
265 | static void aq_nic_polling_timer_cb(struct timer_list *t) |
266 | { |
267 | struct aq_nic_s *self = from_timer(self, t, polling_timer); |
268 | unsigned int i = 0U; |
269 | |
270 | for (i = 0U; self->aq_vecs > i; ++i) |
271 | aq_vec_isr(irq: i, private: (void *)self->aq_vec[i]); |
272 | |
273 | mod_timer(timer: &self->polling_timer, expires: jiffies + |
274 | AQ_CFG_POLLING_TIMER_INTERVAL); |
275 | } |
276 | |
277 | static int aq_nic_hw_prepare(struct aq_nic_s *self) |
278 | { |
279 | int err = 0; |
280 | |
281 | err = self->aq_hw_ops->hw_soft_reset(self->aq_hw); |
282 | if (err) |
283 | goto exit; |
284 | |
285 | err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops); |
286 | |
287 | exit: |
288 | return err; |
289 | } |
290 | |
291 | static bool aq_nic_is_valid_ether_addr(const u8 *addr) |
292 | { |
293 | /* Some engineering samples of Aquantia NICs are provisioned with a |
294 | * partially populated MAC, which is still invalid. |
295 | */ |
296 | return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0); |
297 | } |
298 | |
299 | int aq_nic_ndev_register(struct aq_nic_s *self) |
300 | { |
301 | u8 addr[ETH_ALEN]; |
302 | int err = 0; |
303 | |
304 | if (!self->ndev) { |
305 | err = -EINVAL; |
306 | goto err_exit; |
307 | } |
308 | |
309 | err = aq_nic_hw_prepare(self); |
310 | if (err) |
311 | goto err_exit; |
312 | |
313 | #if IS_ENABLED(CONFIG_MACSEC) |
314 | aq_macsec_init(nic: self); |
315 | #endif |
316 | |
317 | if (platform_get_ethdev_address(dev: &self->pdev->dev, netdev: self->ndev) != 0) { |
318 | // If DT has none or an invalid one, ask device for MAC address |
319 | mutex_lock(&self->fwreq_mutex); |
320 | err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); |
321 | mutex_unlock(lock: &self->fwreq_mutex); |
322 | |
323 | if (err) |
324 | goto err_exit; |
325 | |
326 | if (is_valid_ether_addr(addr) && |
327 | aq_nic_is_valid_ether_addr(addr)) { |
328 | eth_hw_addr_set(dev: self->ndev, addr); |
329 | } else { |
330 | netdev_warn(dev: self->ndev, format: "MAC is invalid, will use random." ); |
331 | eth_hw_addr_random(dev: self->ndev); |
332 | } |
333 | } |
334 | |
335 | #if defined(AQ_CFG_MAC_ADDR_PERMANENT) |
336 | { |
337 | static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; |
338 | |
339 | eth_hw_addr_set(self->ndev, mac_addr_permanent); |
340 | } |
341 | #endif |
342 | |
343 | for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs; |
344 | self->aq_vecs++) { |
345 | self->aq_vec[self->aq_vecs] = |
346 | aq_vec_alloc(aq_nic: self, idx: self->aq_vecs, aq_nic_cfg: aq_nic_get_cfg(self)); |
347 | if (!self->aq_vec[self->aq_vecs]) { |
348 | err = -ENOMEM; |
349 | goto err_exit; |
350 | } |
351 | } |
352 | |
353 | netif_carrier_off(dev: self->ndev); |
354 | |
355 | netif_tx_disable(dev: self->ndev); |
356 | |
357 | err = register_netdev(dev: self->ndev); |
358 | if (err) |
359 | goto err_exit; |
360 | |
361 | err_exit: |
362 | #if IS_ENABLED(CONFIG_MACSEC) |
363 | if (err) |
364 | aq_macsec_free(nic: self); |
365 | #endif |
366 | return err; |
367 | } |
368 | |
369 | void aq_nic_ndev_init(struct aq_nic_s *self) |
370 | { |
371 | const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; |
372 | struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; |
373 | |
374 | self->ndev->hw_features |= aq_hw_caps->hw_features; |
375 | self->ndev->features = aq_hw_caps->hw_features; |
376 | self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | |
377 | NETIF_F_RXHASH | NETIF_F_SG | |
378 | NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6; |
379 | self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4; |
380 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
381 | self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
382 | |
383 | self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK; |
384 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; |
385 | self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; |
386 | |
387 | self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | |
388 | NETDEV_XDP_ACT_REDIRECT | |
389 | NETDEV_XDP_ACT_NDO_XMIT | |
390 | NETDEV_XDP_ACT_RX_SG | |
391 | NETDEV_XDP_ACT_NDO_XMIT_SG; |
392 | } |
393 | |
394 | void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, |
395 | struct aq_ring_s *ring) |
396 | { |
397 | self->aq_ring_tx[idx] = ring; |
398 | } |
399 | |
400 | struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) |
401 | { |
402 | return self->ndev; |
403 | } |
404 | |
405 | int aq_nic_init(struct aq_nic_s *self) |
406 | { |
407 | struct aq_vec_s *aq_vec = NULL; |
408 | unsigned int i = 0U; |
409 | int err = 0; |
410 | |
411 | self->power_state = AQ_HW_POWER_STATE_D0; |
412 | mutex_lock(&self->fwreq_mutex); |
413 | err = self->aq_hw_ops->hw_reset(self->aq_hw); |
414 | mutex_unlock(lock: &self->fwreq_mutex); |
415 | if (err < 0) |
416 | goto err_exit; |
417 | /* Restore default settings */ |
418 | aq_nic_set_downshift(self, val: self->aq_nic_cfg.downshift_counter); |
419 | aq_nic_set_media_detect(self, val: self->aq_nic_cfg.is_media_detect ? |
420 | AQ_HW_MEDIA_DETECT_CNT : 0); |
421 | |
422 | err = self->aq_hw_ops->hw_init(self->aq_hw, |
423 | aq_nic_get_ndev(self)->dev_addr); |
424 | if (err < 0) |
425 | goto err_exit; |
426 | |
427 | if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) && |
428 | self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) { |
429 | self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX; |
430 | err = aq_phy_init(aq_hw: self->aq_hw); |
431 | |
432 | /* Disable the PTP on NICs where it's known to cause datapath |
433 | * problems. |
434 | * Ideally this should have been done by PHY provisioning, but |
435 | * many units have been shipped with enabled PTP block already. |
436 | */ |
437 | if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP) |
438 | if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX) |
439 | aq_phy_disable_ptp(aq_hw: self->aq_hw); |
440 | } |
441 | |
442 | for (i = 0U; i < self->aq_vecs; i++) { |
443 | aq_vec = self->aq_vec[i]; |
444 | err = aq_vec_ring_alloc(self: aq_vec, aq_nic: self, idx: i, |
445 | aq_nic_cfg: aq_nic_get_cfg(self)); |
446 | if (err) |
447 | goto err_exit; |
448 | |
449 | aq_vec_init(self: aq_vec, aq_hw_ops: self->aq_hw_ops, aq_hw: self->aq_hw); |
450 | } |
451 | |
452 | if (aq_nic_get_cfg(self)->is_ptp) { |
453 | err = aq_ptp_init(aq_nic: self, idx_vec: self->irqvecs - 1); |
454 | if (err < 0) |
455 | goto err_exit; |
456 | |
457 | err = aq_ptp_ring_alloc(aq_nic: self); |
458 | if (err < 0) |
459 | goto err_exit; |
460 | |
461 | err = aq_ptp_ring_init(aq_nic: self); |
462 | if (err < 0) |
463 | goto err_exit; |
464 | } |
465 | |
466 | netif_carrier_off(dev: self->ndev); |
467 | |
468 | err_exit: |
469 | return err; |
470 | } |
471 | |
472 | int aq_nic_start(struct aq_nic_s *self) |
473 | { |
474 | struct aq_vec_s *aq_vec = NULL; |
475 | struct aq_nic_cfg_s *cfg; |
476 | unsigned int i = 0U; |
477 | int err = 0; |
478 | |
479 | cfg = aq_nic_get_cfg(self); |
480 | |
481 | err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, |
482 | self->mc_list.ar, |
483 | self->mc_list.count); |
484 | if (err < 0) |
485 | goto err_exit; |
486 | |
487 | err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, |
488 | self->packet_filter); |
489 | if (err < 0) |
490 | goto err_exit; |
491 | |
492 | for (i = 0U; self->aq_vecs > i; ++i) { |
493 | aq_vec = self->aq_vec[i]; |
494 | err = aq_vec_start(self: aq_vec); |
495 | if (err < 0) |
496 | goto err_exit; |
497 | } |
498 | |
499 | err = aq_ptp_ring_start(aq_nic: self); |
500 | if (err < 0) |
501 | goto err_exit; |
502 | |
503 | aq_nic_set_loopback(self); |
504 | |
505 | err = self->aq_hw_ops->hw_start(self->aq_hw); |
506 | if (err < 0) |
507 | goto err_exit; |
508 | |
509 | err = aq_nic_update_interrupt_moderation_settings(self); |
510 | if (err) |
511 | goto err_exit; |
512 | |
513 | INIT_WORK(&self->service_task, aq_nic_service_task); |
514 | |
515 | timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); |
516 | aq_nic_service_timer_cb(t: &self->service_timer); |
517 | |
518 | if (cfg->is_polling) { |
519 | timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); |
520 | mod_timer(timer: &self->polling_timer, expires: jiffies + |
521 | AQ_CFG_POLLING_TIMER_INTERVAL); |
522 | } else { |
523 | for (i = 0U; self->aq_vecs > i; ++i) { |
524 | aq_vec = self->aq_vec[i]; |
525 | err = aq_pci_func_alloc_irq(self, i, name: self->ndev->name, |
526 | irq_handler: aq_vec_isr, irq_arg: aq_vec, |
527 | affinity_mask: aq_vec_get_affinity_mask(self: aq_vec)); |
528 | if (err < 0) |
529 | goto err_exit; |
530 | } |
531 | |
532 | err = aq_ptp_irq_alloc(aq_nic: self); |
533 | if (err < 0) |
534 | goto err_exit; |
535 | |
536 | if (cfg->link_irq_vec) { |
537 | int irqvec = pci_irq_vector(dev: self->pdev, |
538 | nr: cfg->link_irq_vec); |
539 | err = request_threaded_irq(irq: irqvec, NULL, |
540 | thread_fn: aq_linkstate_threaded_isr, |
541 | IRQF_SHARED | IRQF_ONESHOT, |
542 | name: self->ndev->name, dev: self); |
543 | if (err < 0) |
544 | goto err_exit; |
545 | self->msix_entry_mask |= (1 << cfg->link_irq_vec); |
546 | } |
547 | |
548 | err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, |
549 | AQ_CFG_IRQ_MASK); |
550 | if (err < 0) |
551 | goto err_exit; |
552 | } |
553 | |
554 | err = netif_set_real_num_tx_queues(dev: self->ndev, |
555 | txq: self->aq_vecs * cfg->tcs); |
556 | if (err < 0) |
557 | goto err_exit; |
558 | |
559 | err = netif_set_real_num_rx_queues(dev: self->ndev, |
560 | rxq: self->aq_vecs * cfg->tcs); |
561 | if (err < 0) |
562 | goto err_exit; |
563 | |
564 | for (i = 0; i < cfg->tcs; i++) { |
565 | u16 offset = self->aq_vecs * i; |
566 | |
567 | netdev_set_tc_queue(dev: self->ndev, tc: i, count: self->aq_vecs, offset); |
568 | } |
569 | netif_tx_start_all_queues(dev: self->ndev); |
570 | |
571 | err_exit: |
572 | return err; |
573 | } |
574 | |
575 | static unsigned int aq_nic_map_xdp(struct aq_nic_s *self, |
576 | struct xdp_frame *xdpf, |
577 | struct aq_ring_s *ring) |
578 | { |
579 | struct device *dev = aq_nic_get_dev(self); |
580 | struct aq_ring_buff_s *first = NULL; |
581 | unsigned int dx = ring->sw_tail; |
582 | struct aq_ring_buff_s *dx_buff; |
583 | struct skb_shared_info *sinfo; |
584 | unsigned int frag_count = 0U; |
585 | unsigned int nr_frags = 0U; |
586 | unsigned int ret = 0U; |
587 | u16 total_len; |
588 | |
589 | dx_buff = &ring->buff_ring[dx]; |
590 | dx_buff->flags = 0U; |
591 | |
592 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
593 | total_len = xdpf->len; |
594 | dx_buff->len = total_len; |
595 | if (xdp_frame_has_frags(frame: xdpf)) { |
596 | nr_frags = sinfo->nr_frags; |
597 | total_len += sinfo->xdp_frags_size; |
598 | } |
599 | dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len, |
600 | DMA_TO_DEVICE); |
601 | |
602 | if (unlikely(dma_mapping_error(dev, dx_buff->pa))) |
603 | goto exit; |
604 | |
605 | first = dx_buff; |
606 | dx_buff->len_pkt = total_len; |
607 | dx_buff->is_sop = 1U; |
608 | dx_buff->is_mapped = 1U; |
609 | ++ret; |
610 | |
611 | for (; nr_frags--; ++frag_count) { |
612 | skb_frag_t *frag = &sinfo->frags[frag_count]; |
613 | unsigned int frag_len = skb_frag_size(frag); |
614 | unsigned int buff_offset = 0U; |
615 | unsigned int buff_size = 0U; |
616 | dma_addr_t frag_pa; |
617 | |
618 | while (frag_len) { |
619 | if (frag_len > AQ_CFG_TX_FRAME_MAX) |
620 | buff_size = AQ_CFG_TX_FRAME_MAX; |
621 | else |
622 | buff_size = frag_len; |
623 | |
624 | frag_pa = skb_frag_dma_map(dev, frag, offset: buff_offset, |
625 | size: buff_size, dir: DMA_TO_DEVICE); |
626 | |
627 | if (unlikely(dma_mapping_error(dev, frag_pa))) |
628 | goto mapping_error; |
629 | |
630 | dx = aq_ring_next_dx(self: ring, dx); |
631 | dx_buff = &ring->buff_ring[dx]; |
632 | |
633 | dx_buff->flags = 0U; |
634 | dx_buff->len = buff_size; |
635 | dx_buff->pa = frag_pa; |
636 | dx_buff->is_mapped = 1U; |
637 | dx_buff->eop_index = 0xffffU; |
638 | |
639 | frag_len -= buff_size; |
640 | buff_offset += buff_size; |
641 | |
642 | ++ret; |
643 | } |
644 | } |
645 | |
646 | first->eop_index = dx; |
647 | dx_buff->is_eop = 1U; |
648 | dx_buff->skb = NULL; |
649 | dx_buff->xdpf = xdpf; |
650 | goto exit; |
651 | |
652 | mapping_error: |
653 | for (dx = ring->sw_tail; |
654 | ret > 0; |
655 | --ret, dx = aq_ring_next_dx(self: ring, dx)) { |
656 | dx_buff = &ring->buff_ring[dx]; |
657 | |
658 | if (!dx_buff->pa) |
659 | continue; |
660 | if (unlikely(dx_buff->is_sop)) |
661 | dma_unmap_single(dev, dx_buff->pa, dx_buff->len, |
662 | DMA_TO_DEVICE); |
663 | else |
664 | dma_unmap_page(dev, dx_buff->pa, dx_buff->len, |
665 | DMA_TO_DEVICE); |
666 | } |
667 | |
668 | exit: |
669 | return ret; |
670 | } |
671 | |
672 | unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, |
673 | struct aq_ring_s *ring) |
674 | { |
675 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
676 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); |
677 | struct device *dev = aq_nic_get_dev(self); |
678 | struct aq_ring_buff_s *first = NULL; |
679 | u8 ipver = ip_hdr(skb)->version; |
680 | struct aq_ring_buff_s *dx_buff; |
681 | bool need_context_tag = false; |
682 | unsigned int frag_count = 0U; |
683 | unsigned int ret = 0U; |
684 | unsigned int dx; |
685 | u8 l4proto = 0; |
686 | |
687 | if (ipver == 4) |
688 | l4proto = ip_hdr(skb)->protocol; |
689 | else if (ipver == 6) |
690 | l4proto = ipv6_hdr(skb)->nexthdr; |
691 | |
692 | dx = ring->sw_tail; |
693 | dx_buff = &ring->buff_ring[dx]; |
694 | dx_buff->flags = 0U; |
695 | |
696 | if (unlikely(skb_is_gso(skb))) { |
697 | dx_buff->mss = skb_shinfo(skb)->gso_size; |
698 | if (l4proto == IPPROTO_TCP) { |
699 | dx_buff->is_gso_tcp = 1U; |
700 | dx_buff->len_l4 = tcp_hdrlen(skb); |
701 | } else if (l4proto == IPPROTO_UDP) { |
702 | dx_buff->is_gso_udp = 1U; |
703 | dx_buff->len_l4 = sizeof(struct udphdr); |
704 | /* UDP GSO Hardware does not replace packet length. */ |
705 | udp_hdr(skb)->len = htons(dx_buff->mss + |
706 | dx_buff->len_l4); |
707 | } else { |
708 | WARN_ONCE(true, "Bad GSO mode" ); |
709 | goto exit; |
710 | } |
711 | dx_buff->len_pkt = skb->len; |
712 | dx_buff->len_l2 = ETH_HLEN; |
713 | dx_buff->len_l3 = skb_network_header_len(skb); |
714 | dx_buff->eop_index = 0xffffU; |
715 | dx_buff->is_ipv6 = (ipver == 6); |
716 | need_context_tag = true; |
717 | } |
718 | |
719 | if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) { |
720 | dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb); |
721 | dx_buff->len_pkt = skb->len; |
722 | dx_buff->is_vlan = 1U; |
723 | need_context_tag = true; |
724 | } |
725 | |
726 | if (need_context_tag) { |
727 | dx = aq_ring_next_dx(self: ring, dx); |
728 | dx_buff = &ring->buff_ring[dx]; |
729 | dx_buff->flags = 0U; |
730 | ++ret; |
731 | } |
732 | |
733 | dx_buff->len = skb_headlen(skb); |
734 | dx_buff->pa = dma_map_single(dev, |
735 | skb->data, |
736 | dx_buff->len, |
737 | DMA_TO_DEVICE); |
738 | |
739 | if (unlikely(dma_mapping_error(dev, dx_buff->pa))) { |
740 | ret = 0; |
741 | goto exit; |
742 | } |
743 | |
744 | first = dx_buff; |
745 | dx_buff->len_pkt = skb->len; |
746 | dx_buff->is_sop = 1U; |
747 | dx_buff->is_mapped = 1U; |
748 | ++ret; |
749 | |
750 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
751 | dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol); |
752 | dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP); |
753 | dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP); |
754 | } |
755 | |
756 | for (; nr_frags--; ++frag_count) { |
757 | unsigned int frag_len = 0U; |
758 | unsigned int buff_offset = 0U; |
759 | unsigned int buff_size = 0U; |
760 | dma_addr_t frag_pa; |
761 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; |
762 | |
763 | frag_len = skb_frag_size(frag); |
764 | |
765 | while (frag_len) { |
766 | if (frag_len > AQ_CFG_TX_FRAME_MAX) |
767 | buff_size = AQ_CFG_TX_FRAME_MAX; |
768 | else |
769 | buff_size = frag_len; |
770 | |
771 | frag_pa = skb_frag_dma_map(dev, |
772 | frag, |
773 | offset: buff_offset, |
774 | size: buff_size, |
775 | dir: DMA_TO_DEVICE); |
776 | |
777 | if (unlikely(dma_mapping_error(dev, |
778 | frag_pa))) |
779 | goto mapping_error; |
780 | |
781 | dx = aq_ring_next_dx(self: ring, dx); |
782 | dx_buff = &ring->buff_ring[dx]; |
783 | |
784 | dx_buff->flags = 0U; |
785 | dx_buff->len = buff_size; |
786 | dx_buff->pa = frag_pa; |
787 | dx_buff->is_mapped = 1U; |
788 | dx_buff->eop_index = 0xffffU; |
789 | |
790 | frag_len -= buff_size; |
791 | buff_offset += buff_size; |
792 | |
793 | ++ret; |
794 | } |
795 | } |
796 | |
797 | first->eop_index = dx; |
798 | dx_buff->is_eop = 1U; |
799 | dx_buff->skb = skb; |
800 | dx_buff->xdpf = NULL; |
801 | goto exit; |
802 | |
803 | mapping_error: |
804 | for (dx = ring->sw_tail; |
805 | ret > 0; |
806 | --ret, dx = aq_ring_next_dx(self: ring, dx)) { |
807 | dx_buff = &ring->buff_ring[dx]; |
808 | |
809 | if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) && |
810 | !dx_buff->is_vlan && dx_buff->pa) { |
811 | if (unlikely(dx_buff->is_sop)) { |
812 | dma_unmap_single(dev, |
813 | dx_buff->pa, |
814 | dx_buff->len, |
815 | DMA_TO_DEVICE); |
816 | } else { |
817 | dma_unmap_page(dev, |
818 | dx_buff->pa, |
819 | dx_buff->len, |
820 | DMA_TO_DEVICE); |
821 | } |
822 | } |
823 | } |
824 | |
825 | exit: |
826 | return ret; |
827 | } |
828 | |
829 | int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, |
830 | struct xdp_frame *xdpf) |
831 | { |
832 | u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx); |
833 | struct net_device *ndev = aq_nic_get_ndev(self: aq_nic); |
834 | struct skb_shared_info *sinfo; |
835 | int cpu = smp_processor_id(); |
836 | int err = NETDEV_TX_BUSY; |
837 | struct netdev_queue *nq; |
838 | unsigned int frags = 1; |
839 | |
840 | if (xdp_frame_has_frags(frame: xdpf)) { |
841 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
842 | frags += sinfo->nr_frags; |
843 | } |
844 | |
845 | if (frags > AQ_CFG_SKB_FRAGS_MAX) |
846 | return err; |
847 | |
848 | nq = netdev_get_tx_queue(dev: ndev, index: tx_ring->idx); |
849 | __netif_tx_lock(txq: nq, cpu); |
850 | |
851 | aq_ring_update_queue_state(ring: tx_ring); |
852 | |
853 | /* Above status update may stop the queue. Check this. */ |
854 | if (__netif_subqueue_stopped(dev: aq_nic_get_ndev(self: aq_nic), queue_index)) |
855 | goto out; |
856 | |
857 | frags = aq_nic_map_xdp(self: aq_nic, xdpf, ring: tx_ring); |
858 | if (likely(frags)) |
859 | err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring, |
860 | frags); |
861 | out: |
862 | __netif_tx_unlock(txq: nq); |
863 | |
864 | return err; |
865 | } |
866 | |
867 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) |
868 | { |
869 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); |
870 | unsigned int vec = skb->queue_mapping % cfg->vecs; |
871 | unsigned int tc = skb->queue_mapping / cfg->vecs; |
872 | struct aq_ring_s *ring = NULL; |
873 | unsigned int frags = 0U; |
874 | int err = NETDEV_TX_OK; |
875 | |
876 | frags = skb_shinfo(skb)->nr_frags + 1; |
877 | |
878 | ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)]; |
879 | |
880 | if (frags > AQ_CFG_SKB_FRAGS_MAX) { |
881 | dev_kfree_skb_any(skb); |
882 | goto err_exit; |
883 | } |
884 | |
885 | aq_ring_update_queue_state(ring); |
886 | |
887 | if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) { |
888 | err = NETDEV_TX_BUSY; |
889 | goto err_exit; |
890 | } |
891 | |
892 | /* Above status update may stop the queue. Check this. */ |
893 | if (__netif_subqueue_stopped(dev: self->ndev, |
894 | AQ_NIC_RING2QMAP(self, ring->idx))) { |
895 | err = NETDEV_TX_BUSY; |
896 | goto err_exit; |
897 | } |
898 | |
899 | frags = aq_nic_map_skb(self, skb, ring); |
900 | |
901 | if (likely(frags)) { |
902 | err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, |
903 | ring, frags); |
904 | } else { |
905 | err = NETDEV_TX_BUSY; |
906 | } |
907 | |
908 | err_exit: |
909 | return err; |
910 | } |
911 | |
912 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) |
913 | { |
914 | return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw); |
915 | } |
916 | |
917 | int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) |
918 | { |
919 | int err = 0; |
920 | |
921 | err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags); |
922 | if (err < 0) |
923 | goto err_exit; |
924 | |
925 | self->packet_filter = flags; |
926 | |
927 | err_exit: |
928 | return err; |
929 | } |
930 | |
931 | int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) |
932 | { |
933 | const struct aq_hw_ops *hw_ops = self->aq_hw_ops; |
934 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
935 | unsigned int packet_filter = ndev->flags; |
936 | struct netdev_hw_addr *ha = NULL; |
937 | unsigned int i = 0U; |
938 | int err = 0; |
939 | |
940 | self->mc_list.count = 0; |
941 | if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { |
942 | packet_filter |= IFF_PROMISC; |
943 | } else { |
944 | netdev_for_each_uc_addr(ha, ndev) { |
945 | ether_addr_copy(dst: self->mc_list.ar[i++], src: ha->addr); |
946 | } |
947 | } |
948 | |
949 | cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST); |
950 | if (cfg->is_mc_list_enabled) { |
951 | if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { |
952 | packet_filter |= IFF_ALLMULTI; |
953 | } else { |
954 | netdev_for_each_mc_addr(ha, ndev) { |
955 | ether_addr_copy(dst: self->mc_list.ar[i++], |
956 | src: ha->addr); |
957 | } |
958 | } |
959 | } |
960 | |
961 | if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { |
962 | self->mc_list.count = i; |
963 | err = hw_ops->hw_multicast_list_set(self->aq_hw, |
964 | self->mc_list.ar, |
965 | self->mc_list.count); |
966 | if (err < 0) |
967 | return err; |
968 | } |
969 | |
970 | return aq_nic_set_packet_filter(self, flags: packet_filter); |
971 | } |
972 | |
973 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
974 | { |
975 | self->aq_nic_cfg.mtu = new_mtu; |
976 | |
977 | return 0; |
978 | } |
979 | |
980 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) |
981 | { |
982 | return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr); |
983 | } |
984 | |
985 | unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) |
986 | { |
987 | return self->link_status.mbps; |
988 | } |
989 | |
990 | int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) |
991 | { |
992 | u32 *regs_buff = p; |
993 | int err = 0; |
994 | |
995 | if (unlikely(!self->aq_hw_ops->hw_get_regs)) |
996 | return -EOPNOTSUPP; |
997 | |
998 | regs->version = 1; |
999 | |
1000 | err = self->aq_hw_ops->hw_get_regs(self->aq_hw, |
1001 | self->aq_nic_cfg.aq_hw_caps, |
1002 | regs_buff); |
1003 | if (err < 0) |
1004 | goto err_exit; |
1005 | |
1006 | err_exit: |
1007 | return err; |
1008 | } |
1009 | |
1010 | int aq_nic_get_regs_count(struct aq_nic_s *self) |
1011 | { |
1012 | if (unlikely(!self->aq_hw_ops->hw_get_regs)) |
1013 | return 0; |
1014 | |
1015 | return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; |
1016 | } |
1017 | |
1018 | u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) |
1019 | { |
1020 | struct aq_stats_s *stats; |
1021 | unsigned int count = 0U; |
1022 | unsigned int i = 0U; |
1023 | unsigned int tc; |
1024 | |
1025 | if (self->aq_fw_ops->update_stats) { |
1026 | mutex_lock(&self->fwreq_mutex); |
1027 | self->aq_fw_ops->update_stats(self->aq_hw); |
1028 | mutex_unlock(lock: &self->fwreq_mutex); |
1029 | } |
1030 | stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); |
1031 | |
1032 | if (!stats) |
1033 | goto err_exit; |
1034 | |
1035 | data[i] = stats->uprc + stats->mprc + stats->bprc; |
1036 | data[++i] = stats->uprc; |
1037 | data[++i] = stats->mprc; |
1038 | data[++i] = stats->bprc; |
1039 | data[++i] = stats->erpt; |
1040 | data[++i] = stats->uptc + stats->mptc + stats->bptc; |
1041 | data[++i] = stats->uptc; |
1042 | data[++i] = stats->mptc; |
1043 | data[++i] = stats->bptc; |
1044 | data[++i] = stats->ubrc; |
1045 | data[++i] = stats->ubtc; |
1046 | data[++i] = stats->mbrc; |
1047 | data[++i] = stats->mbtc; |
1048 | data[++i] = stats->bbrc; |
1049 | data[++i] = stats->bbtc; |
1050 | if (stats->brc) |
1051 | data[++i] = stats->brc; |
1052 | else |
1053 | data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; |
1054 | if (stats->btc) |
1055 | data[++i] = stats->btc; |
1056 | else |
1057 | data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; |
1058 | data[++i] = stats->dma_pkt_rc; |
1059 | data[++i] = stats->dma_pkt_tc; |
1060 | data[++i] = stats->dma_oct_rc; |
1061 | data[++i] = stats->dma_oct_tc; |
1062 | data[++i] = stats->dpc; |
1063 | |
1064 | i++; |
1065 | |
1066 | data += i; |
1067 | |
1068 | for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) { |
1069 | for (i = 0U; self->aq_vecs > i; ++i) { |
1070 | if (!self->aq_vec[i]) |
1071 | break; |
1072 | data += count; |
1073 | count = aq_vec_get_sw_stats(self: self->aq_vec[i], tc, data); |
1074 | } |
1075 | } |
1076 | |
1077 | data += count; |
1078 | |
1079 | err_exit: |
1080 | return data; |
1081 | } |
1082 | |
1083 | static void aq_nic_update_ndev_stats(struct aq_nic_s *self) |
1084 | { |
1085 | struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); |
1086 | struct net_device *ndev = self->ndev; |
1087 | |
1088 | ndev->stats.rx_packets = stats->dma_pkt_rc; |
1089 | ndev->stats.rx_bytes = stats->dma_oct_rc; |
1090 | ndev->stats.rx_errors = stats->erpr; |
1091 | ndev->stats.rx_dropped = stats->dpc; |
1092 | ndev->stats.tx_packets = stats->dma_pkt_tc; |
1093 | ndev->stats.tx_bytes = stats->dma_oct_tc; |
1094 | ndev->stats.tx_errors = stats->erpt; |
1095 | ndev->stats.multicast = stats->mprc; |
1096 | } |
1097 | |
1098 | void aq_nic_get_link_ksettings(struct aq_nic_s *self, |
1099 | struct ethtool_link_ksettings *cmd) |
1100 | { |
1101 | u32 lp_link_speed_msk; |
1102 | |
1103 | if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) |
1104 | cmd->base.port = PORT_FIBRE; |
1105 | else |
1106 | cmd->base.port = PORT_TP; |
1107 | |
1108 | cmd->base.duplex = DUPLEX_UNKNOWN; |
1109 | if (self->link_status.mbps) |
1110 | cmd->base.duplex = self->link_status.full_duplex ? |
1111 | DUPLEX_FULL : DUPLEX_HALF; |
1112 | cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; |
1113 | |
1114 | ethtool_link_ksettings_zero_link_mode(cmd, supported); |
1115 | |
1116 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G) |
1117 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1118 | 10000baseT_Full); |
1119 | |
1120 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G) |
1121 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1122 | 5000baseT_Full); |
1123 | |
1124 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5) |
1125 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1126 | 2500baseT_Full); |
1127 | |
1128 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G) |
1129 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1130 | 1000baseT_Full); |
1131 | |
1132 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF) |
1133 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1134 | 1000baseT_Half); |
1135 | |
1136 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M) |
1137 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1138 | 100baseT_Full); |
1139 | |
1140 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF) |
1141 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1142 | 100baseT_Half); |
1143 | |
1144 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M) |
1145 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1146 | 10baseT_Full); |
1147 | |
1148 | if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF) |
1149 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1150 | 10baseT_Half); |
1151 | |
1152 | if (self->aq_nic_cfg.aq_hw_caps->flow_control) { |
1153 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1154 | Pause); |
1155 | ethtool_link_ksettings_add_link_mode(cmd, supported, |
1156 | Asym_Pause); |
1157 | } |
1158 | |
1159 | ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); |
1160 | |
1161 | if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) |
1162 | ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); |
1163 | else |
1164 | ethtool_link_ksettings_add_link_mode(cmd, supported, TP); |
1165 | |
1166 | ethtool_link_ksettings_zero_link_mode(cmd, advertising); |
1167 | |
1168 | if (self->aq_nic_cfg.is_autoneg) |
1169 | ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); |
1170 | |
1171 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) |
1172 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1173 | 10000baseT_Full); |
1174 | |
1175 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) |
1176 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1177 | 5000baseT_Full); |
1178 | |
1179 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5) |
1180 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1181 | 2500baseT_Full); |
1182 | |
1183 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) |
1184 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1185 | 1000baseT_Full); |
1186 | |
1187 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF) |
1188 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1189 | 1000baseT_Half); |
1190 | |
1191 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) |
1192 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1193 | 100baseT_Full); |
1194 | |
1195 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF) |
1196 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1197 | 100baseT_Half); |
1198 | |
1199 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M) |
1200 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1201 | 10baseT_Full); |
1202 | |
1203 | if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF) |
1204 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1205 | 10baseT_Half); |
1206 | |
1207 | if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX) |
1208 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1209 | Pause); |
1210 | |
1211 | /* Asym is when either RX or TX, but not both */ |
1212 | if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^ |
1213 | !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)) |
1214 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
1215 | Asym_Pause); |
1216 | |
1217 | if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) |
1218 | ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); |
1219 | else |
1220 | ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); |
1221 | |
1222 | ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); |
1223 | lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk; |
1224 | |
1225 | if (lp_link_speed_msk & AQ_NIC_RATE_10G) |
1226 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1227 | 10000baseT_Full); |
1228 | |
1229 | if (lp_link_speed_msk & AQ_NIC_RATE_5G) |
1230 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1231 | 5000baseT_Full); |
1232 | |
1233 | if (lp_link_speed_msk & AQ_NIC_RATE_2G5) |
1234 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1235 | 2500baseT_Full); |
1236 | |
1237 | if (lp_link_speed_msk & AQ_NIC_RATE_1G) |
1238 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1239 | 1000baseT_Full); |
1240 | |
1241 | if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF) |
1242 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1243 | 1000baseT_Half); |
1244 | |
1245 | if (lp_link_speed_msk & AQ_NIC_RATE_100M) |
1246 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1247 | 100baseT_Full); |
1248 | |
1249 | if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF) |
1250 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1251 | 100baseT_Half); |
1252 | |
1253 | if (lp_link_speed_msk & AQ_NIC_RATE_10M) |
1254 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1255 | 10baseT_Full); |
1256 | |
1257 | if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF) |
1258 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1259 | 10baseT_Half); |
1260 | |
1261 | if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX) |
1262 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1263 | Pause); |
1264 | if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^ |
1265 | !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)) |
1266 | ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, |
1267 | Asym_Pause); |
1268 | } |
1269 | |
1270 | int aq_nic_set_link_ksettings(struct aq_nic_s *self, |
1271 | const struct ethtool_link_ksettings *cmd) |
1272 | { |
1273 | int fduplex = (cmd->base.duplex == DUPLEX_FULL); |
1274 | u32 speed = cmd->base.speed; |
1275 | u32 rate = 0U; |
1276 | int err = 0; |
1277 | |
1278 | if (!fduplex && speed > SPEED_1000) { |
1279 | err = -EINVAL; |
1280 | goto err_exit; |
1281 | } |
1282 | |
1283 | if (cmd->base.autoneg == AUTONEG_ENABLE) { |
1284 | rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk; |
1285 | self->aq_nic_cfg.is_autoneg = true; |
1286 | } else { |
1287 | switch (speed) { |
1288 | case SPEED_10: |
1289 | rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF; |
1290 | break; |
1291 | |
1292 | case SPEED_100: |
1293 | rate = fduplex ? AQ_NIC_RATE_100M |
1294 | : AQ_NIC_RATE_100M_HALF; |
1295 | break; |
1296 | |
1297 | case SPEED_1000: |
1298 | rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF; |
1299 | break; |
1300 | |
1301 | case SPEED_2500: |
1302 | rate = AQ_NIC_RATE_2G5; |
1303 | break; |
1304 | |
1305 | case SPEED_5000: |
1306 | rate = AQ_NIC_RATE_5G; |
1307 | break; |
1308 | |
1309 | case SPEED_10000: |
1310 | rate = AQ_NIC_RATE_10G; |
1311 | break; |
1312 | |
1313 | default: |
1314 | err = -1; |
1315 | goto err_exit; |
1316 | } |
1317 | if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) { |
1318 | err = -1; |
1319 | goto err_exit; |
1320 | } |
1321 | |
1322 | self->aq_nic_cfg.is_autoneg = false; |
1323 | } |
1324 | |
1325 | mutex_lock(&self->fwreq_mutex); |
1326 | err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate); |
1327 | mutex_unlock(lock: &self->fwreq_mutex); |
1328 | if (err < 0) |
1329 | goto err_exit; |
1330 | |
1331 | self->aq_nic_cfg.link_speed_msk = rate; |
1332 | |
1333 | err_exit: |
1334 | return err; |
1335 | } |
1336 | |
1337 | struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) |
1338 | { |
1339 | return &self->aq_nic_cfg; |
1340 | } |
1341 | |
1342 | u32 aq_nic_get_fw_version(struct aq_nic_s *self) |
1343 | { |
1344 | return self->aq_hw_ops->hw_get_fw_version(self->aq_hw); |
1345 | } |
1346 | |
1347 | int aq_nic_set_loopback(struct aq_nic_s *self) |
1348 | { |
1349 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1350 | |
1351 | if (!self->aq_hw_ops->hw_set_loopback || |
1352 | !self->aq_fw_ops->set_phyloopback) |
1353 | return -EOPNOTSUPP; |
1354 | |
1355 | mutex_lock(&self->fwreq_mutex); |
1356 | self->aq_hw_ops->hw_set_loopback(self->aq_hw, |
1357 | AQ_HW_LOOPBACK_DMA_SYS, |
1358 | !!(cfg->priv_flags & |
1359 | BIT(AQ_HW_LOOPBACK_DMA_SYS))); |
1360 | |
1361 | self->aq_hw_ops->hw_set_loopback(self->aq_hw, |
1362 | AQ_HW_LOOPBACK_PKT_SYS, |
1363 | !!(cfg->priv_flags & |
1364 | BIT(AQ_HW_LOOPBACK_PKT_SYS))); |
1365 | |
1366 | self->aq_hw_ops->hw_set_loopback(self->aq_hw, |
1367 | AQ_HW_LOOPBACK_DMA_NET, |
1368 | !!(cfg->priv_flags & |
1369 | BIT(AQ_HW_LOOPBACK_DMA_NET))); |
1370 | |
1371 | self->aq_fw_ops->set_phyloopback(self->aq_hw, |
1372 | AQ_HW_LOOPBACK_PHYINT_SYS, |
1373 | !!(cfg->priv_flags & |
1374 | BIT(AQ_HW_LOOPBACK_PHYINT_SYS))); |
1375 | |
1376 | self->aq_fw_ops->set_phyloopback(self->aq_hw, |
1377 | AQ_HW_LOOPBACK_PHYEXT_SYS, |
1378 | !!(cfg->priv_flags & |
1379 | BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))); |
1380 | mutex_unlock(lock: &self->fwreq_mutex); |
1381 | |
1382 | return 0; |
1383 | } |
1384 | |
1385 | int aq_nic_stop(struct aq_nic_s *self) |
1386 | { |
1387 | unsigned int i = 0U; |
1388 | |
1389 | netif_tx_disable(dev: self->ndev); |
1390 | netif_carrier_off(dev: self->ndev); |
1391 | |
1392 | del_timer_sync(timer: &self->service_timer); |
1393 | cancel_work_sync(work: &self->service_task); |
1394 | |
1395 | self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); |
1396 | |
1397 | if (self->aq_nic_cfg.is_polling) |
1398 | del_timer_sync(timer: &self->polling_timer); |
1399 | else |
1400 | aq_pci_func_free_irqs(self); |
1401 | |
1402 | aq_ptp_irq_free(aq_nic: self); |
1403 | |
1404 | for (i = 0U; self->aq_vecs > i; ++i) |
1405 | aq_vec_stop(self: self->aq_vec[i]); |
1406 | |
1407 | aq_ptp_ring_stop(aq_nic: self); |
1408 | |
1409 | return self->aq_hw_ops->hw_stop(self->aq_hw); |
1410 | } |
1411 | |
1412 | void aq_nic_set_power(struct aq_nic_s *self) |
1413 | { |
1414 | if (self->power_state != AQ_HW_POWER_STATE_D0 || |
1415 | self->aq_hw->aq_nic_cfg->wol) |
1416 | if (likely(self->aq_fw_ops->set_power)) { |
1417 | mutex_lock(&self->fwreq_mutex); |
1418 | self->aq_fw_ops->set_power(self->aq_hw, |
1419 | self->power_state, |
1420 | self->ndev->dev_addr); |
1421 | mutex_unlock(lock: &self->fwreq_mutex); |
1422 | } |
1423 | } |
1424 | |
1425 | void aq_nic_deinit(struct aq_nic_s *self, bool link_down) |
1426 | { |
1427 | struct aq_vec_s *aq_vec = NULL; |
1428 | unsigned int i = 0U; |
1429 | |
1430 | if (!self) |
1431 | goto err_exit; |
1432 | |
1433 | for (i = 0U; i < self->aq_vecs; i++) { |
1434 | aq_vec = self->aq_vec[i]; |
1435 | aq_vec_deinit(self: aq_vec); |
1436 | aq_vec_ring_free(self: aq_vec); |
1437 | } |
1438 | |
1439 | aq_ptp_unregister(aq_nic: self); |
1440 | aq_ptp_ring_deinit(aq_nic: self); |
1441 | aq_ptp_ring_free(aq_nic: self); |
1442 | aq_ptp_free(aq_nic: self); |
1443 | |
1444 | if (likely(self->aq_fw_ops->deinit) && link_down) { |
1445 | mutex_lock(&self->fwreq_mutex); |
1446 | self->aq_fw_ops->deinit(self->aq_hw); |
1447 | mutex_unlock(lock: &self->fwreq_mutex); |
1448 | } |
1449 | |
1450 | err_exit:; |
1451 | } |
1452 | |
1453 | void aq_nic_free_vectors(struct aq_nic_s *self) |
1454 | { |
1455 | unsigned int i = 0U; |
1456 | |
1457 | if (!self) |
1458 | goto err_exit; |
1459 | |
1460 | for (i = ARRAY_SIZE(self->aq_vec); i--;) { |
1461 | if (self->aq_vec[i]) { |
1462 | aq_vec_free(self: self->aq_vec[i]); |
1463 | self->aq_vec[i] = NULL; |
1464 | } |
1465 | } |
1466 | |
1467 | err_exit:; |
1468 | } |
1469 | |
1470 | int aq_nic_realloc_vectors(struct aq_nic_s *self) |
1471 | { |
1472 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); |
1473 | |
1474 | aq_nic_free_vectors(self); |
1475 | |
1476 | for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) { |
1477 | self->aq_vec[self->aq_vecs] = aq_vec_alloc(aq_nic: self, idx: self->aq_vecs, |
1478 | aq_nic_cfg: cfg); |
1479 | if (unlikely(!self->aq_vec[self->aq_vecs])) |
1480 | return -ENOMEM; |
1481 | } |
1482 | |
1483 | return 0; |
1484 | } |
1485 | |
1486 | void aq_nic_shutdown(struct aq_nic_s *self) |
1487 | { |
1488 | int err = 0; |
1489 | |
1490 | if (!self->ndev) |
1491 | return; |
1492 | |
1493 | rtnl_lock(); |
1494 | |
1495 | netif_device_detach(dev: self->ndev); |
1496 | |
1497 | if (netif_running(dev: self->ndev)) { |
1498 | err = aq_nic_stop(self); |
1499 | if (err < 0) |
1500 | goto err_exit; |
1501 | } |
1502 | aq_nic_deinit(self, link_down: !self->aq_hw->aq_nic_cfg->wol); |
1503 | aq_nic_set_power(self); |
1504 | |
1505 | err_exit: |
1506 | rtnl_unlock(); |
1507 | } |
1508 | |
1509 | u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type) |
1510 | { |
1511 | u8 location = 0xFF; |
1512 | u32 fltr_cnt; |
1513 | u32 n_bit; |
1514 | |
1515 | switch (type) { |
1516 | case aq_rx_filter_ethertype: |
1517 | location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT - |
1518 | self->aq_hw_rx_fltrs.fet_reserved_count; |
1519 | self->aq_hw_rx_fltrs.fet_reserved_count++; |
1520 | break; |
1521 | case aq_rx_filter_l3l4: |
1522 | fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4; |
1523 | n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count; |
1524 | |
1525 | self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit); |
1526 | self->aq_hw_rx_fltrs.fl3l4.reserved_count++; |
1527 | location = n_bit; |
1528 | break; |
1529 | default: |
1530 | break; |
1531 | } |
1532 | |
1533 | return location; |
1534 | } |
1535 | |
1536 | void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type, |
1537 | u32 location) |
1538 | { |
1539 | switch (type) { |
1540 | case aq_rx_filter_ethertype: |
1541 | self->aq_hw_rx_fltrs.fet_reserved_count--; |
1542 | break; |
1543 | case aq_rx_filter_l3l4: |
1544 | self->aq_hw_rx_fltrs.fl3l4.reserved_count--; |
1545 | self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location); |
1546 | break; |
1547 | default: |
1548 | break; |
1549 | } |
1550 | } |
1551 | |
1552 | int aq_nic_set_downshift(struct aq_nic_s *self, int val) |
1553 | { |
1554 | int err = 0; |
1555 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1556 | |
1557 | if (!self->aq_fw_ops->set_downshift) |
1558 | return -EOPNOTSUPP; |
1559 | |
1560 | if (val > 15) { |
1561 | netdev_err(dev: self->ndev, format: "downshift counter should be <= 15\n" ); |
1562 | return -EINVAL; |
1563 | } |
1564 | cfg->downshift_counter = val; |
1565 | |
1566 | mutex_lock(&self->fwreq_mutex); |
1567 | err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter); |
1568 | mutex_unlock(lock: &self->fwreq_mutex); |
1569 | |
1570 | return err; |
1571 | } |
1572 | |
1573 | int aq_nic_set_media_detect(struct aq_nic_s *self, int val) |
1574 | { |
1575 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1576 | int err = 0; |
1577 | |
1578 | if (!self->aq_fw_ops->set_media_detect) |
1579 | return -EOPNOTSUPP; |
1580 | |
1581 | if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) { |
1582 | netdev_err(dev: self->ndev, format: "EDPD on this device could have only fixed value of %d\n" , |
1583 | AQ_HW_MEDIA_DETECT_CNT); |
1584 | return -EINVAL; |
1585 | } |
1586 | |
1587 | mutex_lock(&self->fwreq_mutex); |
1588 | err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val); |
1589 | mutex_unlock(lock: &self->fwreq_mutex); |
1590 | |
1591 | /* msecs plays no role - configuration is always fixed in PHY */ |
1592 | if (!err) |
1593 | cfg->is_media_detect = !!val; |
1594 | |
1595 | return err; |
1596 | } |
1597 | |
1598 | int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map) |
1599 | { |
1600 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1601 | const unsigned int prev_vecs = cfg->vecs; |
1602 | bool ndev_running; |
1603 | int err = 0; |
1604 | int i; |
1605 | |
1606 | /* if already the same configuration or |
1607 | * disable request (tcs is 0) and we already is disabled |
1608 | */ |
1609 | if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos)) |
1610 | return 0; |
1611 | |
1612 | ndev_running = netif_running(dev: self->ndev); |
1613 | if (ndev_running) |
1614 | dev_close(dev: self->ndev); |
1615 | |
1616 | cfg->tcs = tcs; |
1617 | if (cfg->tcs == 0) |
1618 | cfg->tcs = 1; |
1619 | if (prio_tc_map) |
1620 | memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map)); |
1621 | else |
1622 | for (i = 0; i < sizeof(cfg->prio_tc_map); i++) |
1623 | cfg->prio_tc_map[i] = cfg->tcs * i / 8; |
1624 | |
1625 | cfg->is_qos = !!tcs; |
1626 | cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC); |
1627 | if (!cfg->is_ptp) |
1628 | netdev_warn(dev: self->ndev, format: "%s\n" , |
1629 | "PTP is auto disabled due to requested TC count." ); |
1630 | |
1631 | netdev_set_num_tc(dev: self->ndev, num_tc: cfg->tcs); |
1632 | |
1633 | /* Changing the number of TCs might change the number of vectors */ |
1634 | aq_nic_cfg_update_num_vecs(self); |
1635 | if (prev_vecs != cfg->vecs) { |
1636 | err = aq_nic_realloc_vectors(self); |
1637 | if (err) |
1638 | goto err_exit; |
1639 | } |
1640 | |
1641 | if (ndev_running) |
1642 | err = dev_open(dev: self->ndev, NULL); |
1643 | |
1644 | err_exit: |
1645 | return err; |
1646 | } |
1647 | |
1648 | int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc, |
1649 | const u32 max_rate) |
1650 | { |
1651 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1652 | |
1653 | if (tc >= AQ_CFG_TCS_MAX) |
1654 | return -EINVAL; |
1655 | |
1656 | if (max_rate && max_rate < 10) { |
1657 | netdev_warn(dev: self->ndev, |
1658 | format: "Setting %s to the minimum usable value of %dMbps.\n" , |
1659 | "max rate" , 10); |
1660 | cfg->tc_max_rate[tc] = 10; |
1661 | } else { |
1662 | cfg->tc_max_rate[tc] = max_rate; |
1663 | } |
1664 | |
1665 | return 0; |
1666 | } |
1667 | |
1668 | int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc, |
1669 | const u32 min_rate) |
1670 | { |
1671 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
1672 | |
1673 | if (tc >= AQ_CFG_TCS_MAX) |
1674 | return -EINVAL; |
1675 | |
1676 | if (min_rate) |
1677 | set_bit(nr: tc, addr: &cfg->tc_min_rate_msk); |
1678 | else |
1679 | clear_bit(nr: tc, addr: &cfg->tc_min_rate_msk); |
1680 | |
1681 | if (min_rate && min_rate < 20) { |
1682 | netdev_warn(dev: self->ndev, |
1683 | format: "Setting %s to the minimum usable value of %dMbps.\n" , |
1684 | "min rate" , 20); |
1685 | cfg->tc_min_rate[tc] = 20; |
1686 | } else { |
1687 | cfg->tc_min_rate[tc] = min_rate; |
1688 | } |
1689 | |
1690 | return 0; |
1691 | } |
1692 | |