1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Atlantic Network Driver |
3 | * |
4 | * Copyright (C) 2014-2019 aQuantia Corporation |
5 | * Copyright (C) 2019-2020 Marvell International Ltd. |
6 | */ |
7 | |
8 | /* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ |
9 | |
10 | #include "../aq_hw.h" |
11 | #include "../aq_hw_utils.h" |
12 | #include "../aq_ring.h" |
13 | #include "../aq_nic.h" |
14 | #include "hw_atl_a0.h" |
15 | #include "hw_atl_utils.h" |
16 | #include "hw_atl_llh.h" |
17 | #include "hw_atl_a0_internal.h" |
18 | |
19 | #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ |
20 | .is_64_dma = true, \ |
21 | .op64bit = false, \ |
22 | .msix_irqs = 4U, \ |
23 | .irq_mask = ~0U, \ |
24 | .vecs = HW_ATL_A0_RSS_MAX, \ |
25 | .tcs_max = HW_ATL_A0_TC_MAX, \ |
26 | .rxd_alignment = 1U, \ |
27 | .rxd_size = HW_ATL_A0_RXD_SIZE, \ |
28 | .rxds_max = HW_ATL_A0_MAX_RXD, \ |
29 | .rxds_min = HW_ATL_A0_MIN_RXD, \ |
30 | .txd_alignment = 1U, \ |
31 | .txd_size = HW_ATL_A0_TXD_SIZE, \ |
32 | .txds_max = HW_ATL_A0_MAX_TXD, \ |
33 | .txds_min = HW_ATL_A0_MIN_RXD, \ |
34 | .txhwb_alignment = 4096U, \ |
35 | .tx_rings = HW_ATL_A0_TX_RINGS, \ |
36 | .rx_rings = HW_ATL_A0_RX_RINGS, \ |
37 | .hw_features = NETIF_F_HW_CSUM | \ |
38 | NETIF_F_RXHASH | \ |
39 | NETIF_F_RXCSUM | \ |
40 | NETIF_F_SG | \ |
41 | NETIF_F_TSO | \ |
42 | NETIF_F_NTUPLE | \ |
43 | NETIF_F_HW_VLAN_CTAG_FILTER, \ |
44 | .hw_priv_flags = IFF_UNICAST_FLT, \ |
45 | .flow_control = true, \ |
46 | .mtu = HW_ATL_A0_MTU_JUMBO, \ |
47 | .mac_regs_count = 88, \ |
48 | .hw_alive_check_addr = 0x10U |
49 | |
50 | const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { |
51 | DEFAULT_A0_BOARD_BASIC_CAPABILITIES, |
52 | .media_type = AQ_HW_MEDIA_TYPE_FIBRE, |
53 | .link_speed_msk = AQ_NIC_RATE_5G | |
54 | AQ_NIC_RATE_2G5 | |
55 | AQ_NIC_RATE_1G | |
56 | AQ_NIC_RATE_100M, |
57 | }; |
58 | |
59 | const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { |
60 | DEFAULT_A0_BOARD_BASIC_CAPABILITIES, |
61 | .media_type = AQ_HW_MEDIA_TYPE_TP, |
62 | .link_speed_msk = AQ_NIC_RATE_10G | |
63 | AQ_NIC_RATE_5G | |
64 | AQ_NIC_RATE_2G5 | |
65 | AQ_NIC_RATE_1G | |
66 | AQ_NIC_RATE_100M, |
67 | }; |
68 | |
69 | const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { |
70 | DEFAULT_A0_BOARD_BASIC_CAPABILITIES, |
71 | .media_type = AQ_HW_MEDIA_TYPE_TP, |
72 | .link_speed_msk = AQ_NIC_RATE_5G | |
73 | AQ_NIC_RATE_2G5 | |
74 | AQ_NIC_RATE_1G | |
75 | AQ_NIC_RATE_100M, |
76 | }; |
77 | |
78 | const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { |
79 | DEFAULT_A0_BOARD_BASIC_CAPABILITIES, |
80 | .media_type = AQ_HW_MEDIA_TYPE_TP, |
81 | .link_speed_msk = AQ_NIC_RATE_2G5 | |
82 | AQ_NIC_RATE_1G | |
83 | AQ_NIC_RATE_100M, |
84 | }; |
85 | |
86 | static int hw_atl_a0_hw_reset(struct aq_hw_s *self) |
87 | { |
88 | int err = 0; |
89 | u32 val; |
90 | |
91 | hw_atl_glb_glb_reg_res_dis_set(aq_hw: self, glb_reg_res_dis: 1U); |
92 | hw_atl_pci_pci_reg_res_dis_set(aq_hw: self, pci_reg_res_dis: 0U); |
93 | hw_atl_rx_rx_reg_res_dis_set(aq_hw: self, rx_reg_res_dis: 0U); |
94 | hw_atl_tx_tx_reg_res_dis_set(aq_hw: self, tx_reg_res_dis: 0U); |
95 | |
96 | HW_ATL_FLUSH(); |
97 | hw_atl_glb_soft_res_set(aq_hw: self, soft_res: 1); |
98 | |
99 | /* check 10 times by 1ms */ |
100 | err = readx_poll_timeout_atomic(hw_atl_glb_soft_res_get, |
101 | self, val, val == 0, |
102 | 1000U, 10000U); |
103 | if (err < 0) |
104 | goto err_exit; |
105 | |
106 | hw_atl_itr_irq_reg_res_dis_set(aq_hw: self, irq_reg_res_dis: 0U); |
107 | hw_atl_itr_res_irq_set(aq_hw: self, res_irq: 1U); |
108 | |
109 | /* check 10 times by 1ms */ |
110 | err = readx_poll_timeout_atomic(hw_atl_itr_res_irq_get, |
111 | self, val, val == 0, |
112 | 1000U, 10000U); |
113 | if (err < 0) |
114 | goto err_exit; |
115 | |
116 | self->aq_fw_ops->set_state(self, MPI_RESET); |
117 | |
118 | err = aq_hw_err_from_flags(hw: self); |
119 | |
120 | err_exit: |
121 | return err; |
122 | } |
123 | |
124 | static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) |
125 | { |
126 | bool is_rx_flow_control = false; |
127 | unsigned int i_priority = 0U; |
128 | u32 buff_size = 0U; |
129 | u32 tc = 0U; |
130 | |
131 | /* TPS Descriptor rate init */ |
132 | hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(aq_hw: self, curr_time_res: 0x0U); |
133 | hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(aq_hw: self, tx_pkt_shed_desc_rate_lim: 0xA); |
134 | |
135 | /* TPS VM init */ |
136 | hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(aq_hw: self, arb_mode: 0U); |
137 | |
138 | /* TPS TC credits init */ |
139 | hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(aq_hw: self, arb_mode: 0U); |
140 | hw_atl_tps_tx_pkt_shed_data_arb_mode_set(aq_hw: self, tx_pkt_shed_data_arb_mode: 0U); |
141 | |
142 | hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(aq_hw: self, tc: 0U, max_credit: 0xFFF); |
143 | hw_atl_tps_tx_pkt_shed_tc_data_weight_set(aq_hw: self, tc: 0U, weight: 0x64); |
144 | hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(aq_hw: self, tc: 0U, max_credit: 0x50); |
145 | hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(aq_hw: self, tc: 0U, weight: 0x1E); |
146 | |
147 | /* Tx buf size */ |
148 | buff_size = HW_ATL_A0_TXBUF_MAX; |
149 | |
150 | hw_atl_tpb_tx_pkt_buff_size_per_tc_set(aq_hw: self, tx_pkt_buff_size_per_tc: buff_size, buffer: tc); |
151 | hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(aq_hw: self, |
152 | tx_buff_hi_threshold_per_tc: (buff_size * |
153 | (1024 / 32U) * 66U) / |
154 | 100U, buffer: tc); |
155 | hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(aq_hw: self, |
156 | tx_buff_lo_threshold_per_tc: (buff_size * |
157 | (1024 / 32U) * 50U) / |
158 | 100U, buffer: tc); |
159 | |
160 | /* QoS Rx buf size per TC */ |
161 | tc = 0; |
162 | is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req); |
163 | buff_size = HW_ATL_A0_RXBUF_MAX; |
164 | |
165 | hw_atl_rpb_rx_pkt_buff_size_per_tc_set(aq_hw: self, rx_pkt_buff_size_per_tc: buff_size, buffer: tc); |
166 | hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(aq_hw: self, |
167 | rx_buff_hi_threshold_per_tc: (buff_size * |
168 | (1024U / 32U) * 66U) / |
169 | 100U, buffer: tc); |
170 | hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(aq_hw: self, |
171 | rx_buff_lo_threshold_per_tc: (buff_size * |
172 | (1024U / 32U) * 50U) / |
173 | 100U, buffer: tc); |
174 | hw_atl_rpb_rx_xoff_en_per_tc_set(aq_hw: self, rx_xoff_en_per_tc: is_rx_flow_control ? 1U : 0U, buffer: tc); |
175 | |
176 | /* QoS 802.1p priority -> TC mapping */ |
177 | for (i_priority = 8U; i_priority--;) |
178 | hw_atl_rpf_rpb_user_priority_tc_map_set(aq_hw: self, user_priority_tc_map: i_priority, tc: 0U); |
179 | |
180 | return aq_hw_err_from_flags(hw: self); |
181 | } |
182 | |
183 | static int (struct aq_hw_s *self, |
184 | struct aq_rss_parameters *) |
185 | { |
186 | struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; |
187 | unsigned int addr = 0U; |
188 | unsigned int i = 0U; |
189 | int err = 0; |
190 | u32 val; |
191 | |
192 | for (i = 10, addr = 0U; i--; ++addr) { |
193 | u32 key_data = cfg->is_rss ? |
194 | __swab32(rss_params->hash_secret_key[i]) : 0U; |
195 | hw_atl_rpf_rss_key_wr_data_set(aq_hw: self, rss_key_wr_data: key_data); |
196 | hw_atl_rpf_rss_key_addr_set(aq_hw: self, rss_key_addr: addr); |
197 | hw_atl_rpf_rss_key_wr_en_set(aq_hw: self, rss_key_wr_en: 1U); |
198 | err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get, |
199 | self, val, val == 0, |
200 | 1000U, 10000U); |
201 | if (err < 0) |
202 | goto err_exit; |
203 | } |
204 | |
205 | err = aq_hw_err_from_flags(hw: self); |
206 | |
207 | err_exit: |
208 | return err; |
209 | } |
210 | |
211 | static int (struct aq_hw_s *self, |
212 | struct aq_rss_parameters *) |
213 | { |
214 | u32 = max(1U, self->aq_nic_cfg->num_rss_queues); |
215 | u8 *indirection_table = rss_params->indirection_table; |
216 | u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX * |
217 | HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; |
218 | int err = 0; |
219 | u32 i = 0U; |
220 | u32 val; |
221 | |
222 | memset(bitary, 0, sizeof(bitary)); |
223 | |
224 | for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { |
225 | (*(u32 *)(bitary + ((i * 3U) / 16U))) |= |
226 | ((indirection_table[i] % num_rss_queues) << |
227 | ((i * 3U) & 0xFU)); |
228 | } |
229 | |
230 | for (i = ARRAY_SIZE(bitary); i--;) { |
231 | hw_atl_rpf_rss_redir_tbl_wr_data_set(aq_hw: self, rss_redir_tbl_wr_data: bitary[i]); |
232 | hw_atl_rpf_rss_redir_tbl_addr_set(aq_hw: self, rss_redir_tbl_addr: i); |
233 | hw_atl_rpf_rss_redir_wr_en_set(aq_hw: self, rss_redir_wr_en: 1U); |
234 | err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get, |
235 | self, val, val == 0, |
236 | 1000U, 10000U); |
237 | if (err < 0) |
238 | goto err_exit; |
239 | } |
240 | |
241 | err = aq_hw_err_from_flags(hw: self); |
242 | |
243 | err_exit: |
244 | return err; |
245 | } |
246 | |
247 | static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, |
248 | struct aq_nic_cfg_s *aq_nic_cfg) |
249 | { |
250 | /* TX checksums offloads*/ |
251 | hw_atl_tpo_ipv4header_crc_offload_en_set(aq_hw: self, ipv4header_crc_offload_en: 1); |
252 | hw_atl_tpo_tcp_udp_crc_offload_en_set(aq_hw: self, tcp_udp_crc_offload_en: 1); |
253 | |
254 | /* RX checksums offloads*/ |
255 | hw_atl_rpo_ipv4header_crc_offload_en_set(aq_hw: self, ipv4header_crc_offload_en: 1); |
256 | hw_atl_rpo_tcp_udp_crc_offload_en_set(aq_hw: self, tcp_udp_crc_offload_en: 1); |
257 | |
258 | /* LSO offloads*/ |
259 | hw_atl_tdm_large_send_offload_en_set(aq_hw: self, large_send_offload_en: 0xFFFFFFFFU); |
260 | |
261 | return aq_hw_err_from_flags(hw: self); |
262 | } |
263 | |
264 | static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) |
265 | { |
266 | hw_atl_thm_lso_tcp_flag_of_first_pkt_set(aq_hw: self, lso_tcp_flag_of_first_pkt: 0x0FF6U); |
267 | hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(aq_hw: self, lso_tcp_flag_of_middle_pkt: 0x0FF6U); |
268 | hw_atl_thm_lso_tcp_flag_of_last_pkt_set(aq_hw: self, lso_tcp_flag_of_last_pkt: 0x0F7FU); |
269 | |
270 | /* Tx interrupts */ |
271 | hw_atl_tdm_tx_desc_wr_wb_irq_en_set(aq_hw: self, tx_desc_wr_wb_irq_en: 1U); |
272 | |
273 | /* misc */ |
274 | aq_hw_write_reg(hw: self, reg: 0x00007040U, value: 0x00000000U); |
275 | hw_atl_tdm_tx_dca_en_set(aq_hw: self, tx_dca_en: 0U); |
276 | hw_atl_tdm_tx_dca_mode_set(aq_hw: self, tx_dca_mode: 0U); |
277 | |
278 | hw_atl_tpb_tx_path_scp_ins_en_set(aq_hw: self, tx_path_scp_ins_en: 1U); |
279 | |
280 | return aq_hw_err_from_flags(hw: self); |
281 | } |
282 | |
283 | static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) |
284 | { |
285 | struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; |
286 | int i; |
287 | |
288 | /* Rx TC/RSS number config */ |
289 | hw_atl_rpb_rpf_rx_traf_class_mode_set(aq_hw: self, rx_traf_class_mode: 1U); |
290 | |
291 | /* Rx flow control */ |
292 | hw_atl_rpb_rx_flow_ctl_mode_set(aq_hw: self, rx_flow_ctl_mode: 1U); |
293 | |
294 | /* RSS Ring selection */ |
295 | hw_atl_reg_rx_flr_rss_control1set(aq_hw: self, rx_flr_rss_control1: cfg->is_rss ? |
296 | 0xB3333333U : 0x00000000U); |
297 | |
298 | /* Multicast filters */ |
299 | for (i = HW_ATL_A0_MAC_MAX; i--;) { |
300 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, l2unicast_flr_en: (i == 0U) ? 1U : 0U, filter: i); |
301 | hw_atl_rpfl2unicast_flr_act_set(aq_hw: self, l2unicast_flr_act: 1U, filter: i); |
302 | } |
303 | |
304 | hw_atl_reg_rx_flr_mcst_flr_msk_set(aq_hw: self, rx_flr_mcst_flr_msk: 0x00000000U); |
305 | hw_atl_reg_rx_flr_mcst_flr_set(aq_hw: self, rx_flr_mcst_flr: 0x00010FFFU, filter: 0U); |
306 | |
307 | /* Vlan filters */ |
308 | hw_atl_rpf_vlan_outer_etht_set(aq_hw: self, vlan_outer_etht: 0x88A8U); |
309 | hw_atl_rpf_vlan_inner_etht_set(aq_hw: self, vlan_inner_etht: 0x8100U); |
310 | hw_atl_rpf_vlan_prom_mode_en_set(aq_hw: self, vlan_prom_mode_en: 1); |
311 | |
312 | /* Rx Interrupts */ |
313 | hw_atl_rdm_rx_desc_wr_wb_irq_en_set(aq_hw: self, rx_desc_wr_wb_irq_en: 1U); |
314 | |
315 | /* misc */ |
316 | hw_atl_rpfl2broadcast_flr_act_set(aq_hw: self, l2broadcast_flr_act: 1U); |
317 | hw_atl_rpfl2broadcast_count_threshold_set(aq_hw: self, l2broadcast_count_threshold: 0xFFFFU & (~0U / 256U)); |
318 | |
319 | hw_atl_rdm_rx_dca_en_set(aq_hw: self, rx_dca_en: 0U); |
320 | hw_atl_rdm_rx_dca_mode_set(aq_hw: self, rx_dca_mode: 0U); |
321 | |
322 | return aq_hw_err_from_flags(hw: self); |
323 | } |
324 | |
325 | static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) |
326 | { |
327 | unsigned int h = 0U; |
328 | unsigned int l = 0U; |
329 | int err = 0; |
330 | |
331 | if (!mac_addr) { |
332 | err = -EINVAL; |
333 | goto err_exit; |
334 | } |
335 | |
336 | h = (mac_addr[0] << 8) | (mac_addr[1]); |
337 | l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
338 | (mac_addr[4] << 8) | mac_addr[5]; |
339 | |
340 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, l2unicast_flr_en: 0U, HW_ATL_A0_MAC); |
341 | hw_atl_rpfl2unicast_dest_addresslsw_set(aq_hw: self, l2unicast_dest_addresslsw: l, HW_ATL_A0_MAC); |
342 | hw_atl_rpfl2unicast_dest_addressmsw_set(aq_hw: self, l2unicast_dest_addressmsw: h, HW_ATL_A0_MAC); |
343 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, l2unicast_flr_en: 1U, HW_ATL_A0_MAC); |
344 | |
345 | err = aq_hw_err_from_flags(hw: self); |
346 | |
347 | err_exit: |
348 | return err; |
349 | } |
350 | |
351 | static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) |
352 | { |
353 | static u32 aq_hw_atl_igcr_table_[4][2] = { |
354 | [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, |
355 | [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, |
356 | [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, |
357 | [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, |
358 | }; |
359 | struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; |
360 | int err = 0; |
361 | |
362 | hw_atl_a0_hw_init_tx_path(self); |
363 | hw_atl_a0_hw_init_rx_path(self); |
364 | |
365 | hw_atl_a0_hw_mac_addr_set(self, mac_addr); |
366 | |
367 | self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); |
368 | self->aq_fw_ops->set_state(self, MPI_INIT); |
369 | |
370 | hw_atl_reg_tx_dma_debug_ctl_set(aq_hw: self, tx_dma_debug_ctl: 0x800000b8U); |
371 | hw_atl_reg_tx_dma_debug_ctl_set(aq_hw: self, tx_dma_debug_ctl: 0x000000b8U); |
372 | |
373 | hw_atl_a0_hw_qos_set(self); |
374 | hw_atl_a0_hw_rss_set(self, rss_params: &aq_nic_cfg->aq_rss); |
375 | hw_atl_a0_hw_rss_hash_set(self, rss_params: &aq_nic_cfg->aq_rss); |
376 | |
377 | /* Reset link status and read out initial hardware counters */ |
378 | self->aq_link_status.mbps = 0; |
379 | self->aq_fw_ops->update_stats(self); |
380 | |
381 | err = aq_hw_err_from_flags(hw: self); |
382 | if (err < 0) |
383 | goto err_exit; |
384 | |
385 | /* Interrupts */ |
386 | hw_atl_reg_irq_glb_ctl_set(aq_hw: self, |
387 | intr_glb_ctl: aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] |
388 | [(aq_nic_cfg->vecs > 1U) ? 1 : 0]); |
389 | |
390 | hw_atl_itr_irq_auto_masklsw_set(aq_hw: self, irq_auto_masklsw: aq_nic_cfg->aq_hw_caps->irq_mask); |
391 | |
392 | /* Interrupts */ |
393 | hw_atl_reg_gen_irq_map_set(aq_hw: self, |
394 | gen_intr_map: ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | |
395 | ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | |
396 | ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | |
397 | ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), regidx: 0U); |
398 | |
399 | hw_atl_a0_hw_offload_set(self, aq_nic_cfg); |
400 | |
401 | err_exit: |
402 | return err; |
403 | } |
404 | |
405 | static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, |
406 | struct aq_ring_s *ring) |
407 | { |
408 | hw_atl_tdm_tx_desc_en_set(aq_hw: self, tx_desc_en: 1, descriptor: ring->idx); |
409 | |
410 | return aq_hw_err_from_flags(hw: self); |
411 | } |
412 | |
413 | static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, |
414 | struct aq_ring_s *ring) |
415 | { |
416 | hw_atl_rdm_rx_desc_en_set(aq_hw: self, rx_desc_en: 1, descriptor: ring->idx); |
417 | |
418 | return aq_hw_err_from_flags(hw: self); |
419 | } |
420 | |
421 | static int hw_atl_a0_hw_start(struct aq_hw_s *self) |
422 | { |
423 | hw_atl_tpb_tx_buff_en_set(aq_hw: self, tx_buff_en: 1); |
424 | hw_atl_rpb_rx_buff_en_set(aq_hw: self, rx_buff_en: 1); |
425 | |
426 | return aq_hw_err_from_flags(hw: self); |
427 | } |
428 | |
429 | static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, |
430 | struct aq_ring_s *ring) |
431 | { |
432 | hw_atl_reg_tx_dma_desc_tail_ptr_set(aq_hw: self, tx_dma_desc_tail_ptr: ring->sw_tail, descriptor: ring->idx); |
433 | |
434 | return 0; |
435 | } |
436 | |
437 | static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, |
438 | struct aq_ring_s *ring, |
439 | unsigned int frags) |
440 | { |
441 | struct aq_ring_buff_s *buff = NULL; |
442 | struct hw_atl_txd_s *txd = NULL; |
443 | unsigned int buff_pa_len = 0U; |
444 | unsigned int frag_count = 0U; |
445 | unsigned int pkt_len = 0U; |
446 | bool is_gso = false; |
447 | |
448 | buff = &ring->buff_ring[ring->sw_tail]; |
449 | pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; |
450 | |
451 | for (frag_count = 0; frag_count < frags; frag_count++) { |
452 | txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * |
453 | HW_ATL_A0_TXD_SIZE]; |
454 | txd->ctl = 0; |
455 | txd->ctl2 = 0; |
456 | txd->buf_addr = 0; |
457 | |
458 | buff = &ring->buff_ring[ring->sw_tail]; |
459 | |
460 | if (buff->is_gso_tcp) { |
461 | txd->ctl |= (buff->len_l3 << 31) | |
462 | (buff->len_l2 << 24) | |
463 | HW_ATL_A0_TXD_CTL_CMD_TCP | |
464 | HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; |
465 | txd->ctl2 |= (buff->mss << 16) | |
466 | (buff->len_l4 << 8) | |
467 | (buff->len_l3 >> 1); |
468 | |
469 | pkt_len -= (buff->len_l4 + |
470 | buff->len_l3 + |
471 | buff->len_l2); |
472 | is_gso = true; |
473 | |
474 | if (buff->is_ipv6) |
475 | txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6; |
476 | } else { |
477 | buff_pa_len = buff->len; |
478 | |
479 | txd->buf_addr = buff->pa; |
480 | txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & |
481 | ((u32)buff_pa_len << 4)); |
482 | txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; |
483 | /* PAY_LEN */ |
484 | txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); |
485 | |
486 | if (is_gso) { |
487 | txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; |
488 | txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; |
489 | } |
490 | |
491 | /* Tx checksum offloads */ |
492 | if (buff->is_ip_cso) |
493 | txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; |
494 | |
495 | if (buff->is_udp_cso || buff->is_tcp_cso) |
496 | txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; |
497 | |
498 | if (unlikely(buff->is_eop)) { |
499 | txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; |
500 | txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; |
501 | is_gso = false; |
502 | } |
503 | } |
504 | |
505 | ring->sw_tail = aq_ring_next_dx(self: ring, dx: ring->sw_tail); |
506 | } |
507 | |
508 | hw_atl_a0_hw_tx_ring_tail_update(self, ring); |
509 | |
510 | return aq_hw_err_from_flags(hw: self); |
511 | } |
512 | |
513 | static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, |
514 | struct aq_ring_s *aq_ring, |
515 | struct aq_ring_param_s *aq_ring_param) |
516 | { |
517 | u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); |
518 | u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; |
519 | |
520 | hw_atl_rdm_rx_desc_en_set(aq_hw: self, rx_desc_en: false, descriptor: aq_ring->idx); |
521 | |
522 | hw_atl_rdm_rx_desc_head_splitting_set(aq_hw: self, rx_desc_head_splitting: 0U, descriptor: aq_ring->idx); |
523 | |
524 | hw_atl_reg_rx_dma_desc_base_addresslswset(aq_hw: self, rx_dma_desc_base_addrlsw: dma_desc_addr_lsw, |
525 | descriptor: aq_ring->idx); |
526 | |
527 | hw_atl_reg_rx_dma_desc_base_addressmswset(aq_hw: self, |
528 | rx_dma_desc_base_addrmsw: dma_desc_addr_msw, |
529 | descriptor: aq_ring->idx); |
530 | |
531 | hw_atl_rdm_rx_desc_len_set(aq_hw: self, rx_desc_len: aq_ring->size / 8U, descriptor: aq_ring->idx); |
532 | |
533 | hw_atl_rdm_rx_desc_data_buff_size_set(aq_hw: self, |
534 | rx_desc_data_buff_size: aq_ring->frame_max / 1024U, |
535 | descriptor: aq_ring->idx); |
536 | |
537 | hw_atl_rdm_rx_desc_head_buff_size_set(aq_hw: self, rx_desc_head_buff_size: 0U, descriptor: aq_ring->idx); |
538 | hw_atl_rdm_rx_desc_head_splitting_set(aq_hw: self, rx_desc_head_splitting: 0U, descriptor: aq_ring->idx); |
539 | hw_atl_rpo_rx_desc_vlan_stripping_set(aq_hw: self, rx_desc_vlan_stripping: 0U, descriptor: aq_ring->idx); |
540 | |
541 | /* Rx ring set mode */ |
542 | |
543 | /* Mapping interrupt vector */ |
544 | hw_atl_itr_irq_map_rx_set(aq_hw: self, irq_map_rx: aq_ring_param->vec_idx, rx: aq_ring->idx); |
545 | hw_atl_itr_irq_map_en_rx_set(aq_hw: self, irq_map_en_rx: true, rx: aq_ring->idx); |
546 | |
547 | hw_atl_rdm_cpu_id_set(aq_hw: self, cpuid: aq_ring_param->cpu, dca: aq_ring->idx); |
548 | hw_atl_rdm_rx_desc_dca_en_set(aq_hw: self, rx_desc_dca_en: 0U, dca: aq_ring->idx); |
549 | hw_atl_rdm_rx_head_dca_en_set(aq_hw: self, rx_head_dca_en: 0U, dca: aq_ring->idx); |
550 | hw_atl_rdm_rx_pld_dca_en_set(aq_hw: self, rx_pld_dca_en: 0U, dca: aq_ring->idx); |
551 | |
552 | return aq_hw_err_from_flags(hw: self); |
553 | } |
554 | |
555 | static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, |
556 | struct aq_ring_s *aq_ring, |
557 | struct aq_ring_param_s *aq_ring_param) |
558 | { |
559 | u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); |
560 | u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; |
561 | |
562 | hw_atl_reg_tx_dma_desc_base_addresslswset(aq_hw: self, tx_dma_desc_base_addrlsw: dma_desc_lsw_addr, |
563 | descriptor: aq_ring->idx); |
564 | |
565 | hw_atl_reg_tx_dma_desc_base_addressmswset(aq_hw: self, tx_dma_desc_base_addrmsw: dma_desc_msw_addr, |
566 | descriptor: aq_ring->idx); |
567 | |
568 | hw_atl_tdm_tx_desc_len_set(aq_hw: self, tx_desc_len: aq_ring->size / 8U, descriptor: aq_ring->idx); |
569 | |
570 | hw_atl_a0_hw_tx_ring_tail_update(self, ring: aq_ring); |
571 | |
572 | /* Set Tx threshold */ |
573 | hw_atl_tdm_tx_desc_wr_wb_threshold_set(aq_hw: self, tx_desc_wr_wb_threshold: 0U, descriptor: aq_ring->idx); |
574 | |
575 | /* Mapping interrupt vector */ |
576 | hw_atl_itr_irq_map_tx_set(aq_hw: self, irq_map_tx: aq_ring_param->vec_idx, tx: aq_ring->idx); |
577 | hw_atl_itr_irq_map_en_tx_set(aq_hw: self, irq_map_en_tx: true, tx: aq_ring->idx); |
578 | |
579 | hw_atl_tdm_cpu_id_set(aq_hw: self, cpuid: aq_ring_param->cpu, dca: aq_ring->idx); |
580 | hw_atl_tdm_tx_desc_dca_en_set(aq_hw: self, tx_desc_dca_en: 0U, dca: aq_ring->idx); |
581 | |
582 | return aq_hw_err_from_flags(hw: self); |
583 | } |
584 | |
585 | static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, |
586 | struct aq_ring_s *ring, |
587 | unsigned int sw_tail_old) |
588 | { |
589 | for (; sw_tail_old != ring->sw_tail; |
590 | sw_tail_old = aq_ring_next_dx(self: ring, dx: sw_tail_old)) { |
591 | struct hw_atl_rxd_s *rxd = |
592 | (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * |
593 | HW_ATL_A0_RXD_SIZE]; |
594 | |
595 | struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; |
596 | |
597 | rxd->buf_addr = buff->pa; |
598 | rxd->hdr_addr = 0U; |
599 | } |
600 | |
601 | hw_atl_reg_rx_dma_desc_tail_ptr_set(aq_hw: self, rx_dma_desc_tail_ptr: sw_tail_old, descriptor: ring->idx); |
602 | |
603 | return aq_hw_err_from_flags(hw: self); |
604 | } |
605 | |
606 | static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, |
607 | struct aq_ring_s *ring) |
608 | { |
609 | unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(aq_hw: self, descriptor: ring->idx); |
610 | int err = 0; |
611 | |
612 | if (aq_utils_obj_test(flags: &self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { |
613 | err = -ENXIO; |
614 | goto err_exit; |
615 | } |
616 | ring->hw_head = hw_head; |
617 | err = aq_hw_err_from_flags(hw: self); |
618 | |
619 | err_exit: |
620 | return err; |
621 | } |
622 | |
623 | static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, |
624 | struct aq_ring_s *ring) |
625 | { |
626 | for (; ring->hw_head != ring->sw_tail; |
627 | ring->hw_head = aq_ring_next_dx(self: ring, dx: ring->hw_head)) { |
628 | struct aq_ring_buff_s *buff = NULL; |
629 | struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) |
630 | &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; |
631 | |
632 | unsigned int is_err = 1U; |
633 | unsigned int is_rx_check_sum_enabled = 0U; |
634 | unsigned int pkt_type = 0U; |
635 | |
636 | if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ |
637 | if ((1U << 4) & |
638 | hw_atl_reg_rx_dma_desc_status_get(aq_hw: self, descriptor: ring->idx)) { |
639 | hw_atl_rdm_rx_desc_en_set(aq_hw: self, rx_desc_en: false, descriptor: ring->idx); |
640 | hw_atl_rdm_rx_desc_res_set(aq_hw: self, rx_desc_res: true, descriptor: ring->idx); |
641 | hw_atl_rdm_rx_desc_res_set(aq_hw: self, rx_desc_res: false, descriptor: ring->idx); |
642 | hw_atl_rdm_rx_desc_en_set(aq_hw: self, rx_desc_en: true, descriptor: ring->idx); |
643 | } |
644 | |
645 | if (ring->hw_head || |
646 | (hw_atl_rdm_rx_desc_head_ptr_get(aq_hw: self, |
647 | descriptor: ring->idx) < 2U)) { |
648 | break; |
649 | } else if (!(rxd_wb->status & 0x1U)) { |
650 | struct hw_atl_rxd_wb_s *rxd_wb1 = |
651 | (struct hw_atl_rxd_wb_s *) |
652 | (&ring->dx_ring[(1U) * |
653 | HW_ATL_A0_RXD_SIZE]); |
654 | |
655 | if ((rxd_wb1->status & 0x1U)) { |
656 | rxd_wb->pkt_len = 1514U; |
657 | rxd_wb->status = 3U; |
658 | } else { |
659 | break; |
660 | } |
661 | } |
662 | } |
663 | |
664 | buff = &ring->buff_ring[ring->hw_head]; |
665 | |
666 | if (0x3U != (rxd_wb->status & 0x3U)) |
667 | rxd_wb->status |= 4; |
668 | |
669 | is_err = (0x0000001CU & rxd_wb->status); |
670 | is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); |
671 | pkt_type = 0xFFU & (rxd_wb->type >> 4); |
672 | |
673 | if (is_rx_check_sum_enabled) { |
674 | if (0x0U == (pkt_type & 0x3U)) |
675 | buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; |
676 | |
677 | if (0x4U == (pkt_type & 0x1CU)) |
678 | buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; |
679 | else if (0x0U == (pkt_type & 0x1CU)) |
680 | buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; |
681 | |
682 | /* Checksum offload workaround for small packets */ |
683 | if (rxd_wb->pkt_len <= 60) { |
684 | buff->is_ip_cso = 0U; |
685 | buff->is_cso_err = 0U; |
686 | } |
687 | } |
688 | |
689 | is_err &= ~0x18U; |
690 | is_err &= ~0x04U; |
691 | |
692 | if (is_err || rxd_wb->type & 0x1000U) { |
693 | /* status error or DMA error */ |
694 | buff->is_error = 1U; |
695 | } else { |
696 | if (self->aq_nic_cfg->is_rss) { |
697 | /* last 4 byte */ |
698 | u16 = rxd_wb->type & 0xFU; |
699 | |
700 | if (rss_type && rss_type < 0x8U) { |
701 | buff->is_hash_l4 = (rss_type == 0x4 || |
702 | rss_type == 0x5); |
703 | buff->rss_hash = rxd_wb->rss_hash; |
704 | } |
705 | } |
706 | |
707 | if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { |
708 | buff->len = rxd_wb->pkt_len % |
709 | ring->frame_max; |
710 | buff->len = buff->len ? |
711 | buff->len : ring->frame_max; |
712 | buff->next = 0U; |
713 | buff->is_eop = 1U; |
714 | } else { |
715 | /* jumbo */ |
716 | buff->next = aq_ring_next_dx(self: ring, |
717 | dx: ring->hw_head); |
718 | ++ring->stats.rx.jumbo_packets; |
719 | } |
720 | } |
721 | } |
722 | |
723 | return aq_hw_err_from_flags(hw: self); |
724 | } |
725 | |
726 | static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) |
727 | { |
728 | hw_atl_itr_irq_msk_setlsw_set(aq_hw: self, LODWORD(mask) | |
729 | (1U << HW_ATL_A0_ERR_INT)); |
730 | |
731 | return aq_hw_err_from_flags(hw: self); |
732 | } |
733 | |
734 | static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) |
735 | { |
736 | hw_atl_itr_irq_msk_clearlsw_set(aq_hw: self, LODWORD(mask)); |
737 | hw_atl_itr_irq_status_clearlsw_set(aq_hw: self, LODWORD(mask)); |
738 | |
739 | if ((1U << 16) & hw_atl_reg_gen_irq_status_get(aq_hw: self)) |
740 | atomic_inc(v: &self->dpc); |
741 | |
742 | return aq_hw_err_from_flags(hw: self); |
743 | } |
744 | |
745 | static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) |
746 | { |
747 | *mask = hw_atl_itr_irq_statuslsw_get(aq_hw: self); |
748 | |
749 | return aq_hw_err_from_flags(hw: self); |
750 | } |
751 | |
752 | #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) |
753 | |
754 | static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, |
755 | unsigned int packet_filter) |
756 | { |
757 | struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; |
758 | unsigned int i = 0U; |
759 | |
760 | hw_atl_rpfl2promiscuous_mode_en_set(aq_hw: self, |
761 | IS_FILTER_ENABLED(IFF_PROMISC)); |
762 | hw_atl_rpfl2multicast_flr_en_set(aq_hw: self, |
763 | IS_FILTER_ENABLED(IFF_MULTICAST), filter: 0); |
764 | hw_atl_rpfl2broadcast_en_set(aq_hw: self, IS_FILTER_ENABLED(IFF_BROADCAST)); |
765 | |
766 | cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); |
767 | |
768 | for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) |
769 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, |
770 | l2unicast_flr_en: (cfg->is_mc_list_enabled && |
771 | (i <= cfg->mc_list_count)) ? 1U : 0U, |
772 | filter: i); |
773 | |
774 | return aq_hw_err_from_flags(hw: self); |
775 | } |
776 | |
777 | #undef IS_FILTER_ENABLED |
778 | |
779 | static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, |
780 | u8 ar_mac |
781 | [AQ_HW_MULTICAST_ADDRESS_MAX] |
782 | [ETH_ALEN], |
783 | u32 count) |
784 | { |
785 | struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; |
786 | int err = 0; |
787 | |
788 | if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { |
789 | err = -EBADRQC; |
790 | goto err_exit; |
791 | } |
792 | for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { |
793 | u32 i = cfg->mc_list_count; |
794 | u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); |
795 | u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | |
796 | (ar_mac[i][4] << 8) | ar_mac[i][5]; |
797 | |
798 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, l2unicast_flr_en: 0U, HW_ATL_A0_MAC_MIN + i); |
799 | |
800 | hw_atl_rpfl2unicast_dest_addresslsw_set(aq_hw: self, |
801 | l2unicast_dest_addresslsw: l, |
802 | HW_ATL_A0_MAC_MIN + i); |
803 | |
804 | hw_atl_rpfl2unicast_dest_addressmsw_set(aq_hw: self, |
805 | l2unicast_dest_addressmsw: h, |
806 | HW_ATL_A0_MAC_MIN + i); |
807 | |
808 | hw_atl_rpfl2_uc_flr_en_set(aq_hw: self, |
809 | l2unicast_flr_en: (cfg->is_mc_list_enabled), |
810 | HW_ATL_A0_MAC_MIN + i); |
811 | } |
812 | |
813 | err = aq_hw_err_from_flags(hw: self); |
814 | |
815 | err_exit: |
816 | return err; |
817 | } |
818 | |
819 | static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) |
820 | { |
821 | unsigned int i = 0U; |
822 | u32 itr_rx; |
823 | |
824 | if (self->aq_nic_cfg->itr) { |
825 | if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) { |
826 | u32 itr_ = (self->aq_nic_cfg->itr >> 1); |
827 | |
828 | itr_ = min(AQ_CFG_IRQ_MASK, itr_); |
829 | |
830 | itr_rx = 0x80000000U | (itr_ << 0x10); |
831 | } else { |
832 | u32 n = 0xFFFFU & aq_hw_read_reg(hw: self, reg: 0x00002A00U); |
833 | |
834 | if (n < self->aq_link_status.mbps) { |
835 | itr_rx = 0U; |
836 | } else { |
837 | static unsigned int hw_timers_tbl_[] = { |
838 | 0x01CU, /* 10Gbit */ |
839 | 0x039U, /* 5Gbit */ |
840 | 0x039U, /* 5Gbit 5GS */ |
841 | 0x073U, /* 2.5Gbit */ |
842 | 0x120U, /* 1Gbit */ |
843 | 0x1FFU, /* 100Mbit */ |
844 | }; |
845 | |
846 | unsigned int speed_index = |
847 | hw_atl_utils_mbps_2_speed_index( |
848 | mbps: self->aq_link_status.mbps); |
849 | |
850 | itr_rx = 0x80000000U | |
851 | (hw_timers_tbl_[speed_index] << 0x10U); |
852 | } |
853 | |
854 | aq_hw_write_reg(hw: self, reg: 0x00002A00U, value: 0x40000000U); |
855 | aq_hw_write_reg(hw: self, reg: 0x00002A00U, value: 0x8D000000U); |
856 | } |
857 | } else { |
858 | itr_rx = 0U; |
859 | } |
860 | |
861 | for (i = HW_ATL_A0_RINGS_MAX; i--;) |
862 | hw_atl_reg_irq_thr_set(aq_hw: self, intr_thr: itr_rx, throttle: i); |
863 | |
864 | return aq_hw_err_from_flags(hw: self); |
865 | } |
866 | |
867 | static int hw_atl_a0_hw_stop(struct aq_hw_s *self) |
868 | { |
869 | hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); |
870 | |
871 | return aq_hw_err_from_flags(hw: self); |
872 | } |
873 | |
874 | static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, |
875 | struct aq_ring_s *ring) |
876 | { |
877 | hw_atl_tdm_tx_desc_en_set(aq_hw: self, tx_desc_en: 0U, descriptor: ring->idx); |
878 | |
879 | return aq_hw_err_from_flags(hw: self); |
880 | } |
881 | |
882 | static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, |
883 | struct aq_ring_s *ring) |
884 | { |
885 | hw_atl_rdm_rx_desc_en_set(aq_hw: self, rx_desc_en: 0U, descriptor: ring->idx); |
886 | |
887 | return aq_hw_err_from_flags(hw: self); |
888 | } |
889 | |
890 | static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self, |
891 | struct aq_rx_filter_l3l4 *data) |
892 | { |
893 | u8 location = data->location; |
894 | |
895 | if (!data->is_ipv6) { |
896 | hw_atl_rpfl3l4_cmd_clear(aq_hw: self, location); |
897 | hw_atl_rpf_l4_spd_set(aq_hw: self, val: 0U, filter: location); |
898 | hw_atl_rpf_l4_dpd_set(aq_hw: self, val: 0U, filter: location); |
899 | hw_atl_rpfl3l4_ipv4_src_addr_clear(aq_hw: self, location); |
900 | hw_atl_rpfl3l4_ipv4_dest_addr_clear(aq_hw: self, location); |
901 | } else { |
902 | int i; |
903 | |
904 | for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { |
905 | hw_atl_rpfl3l4_cmd_clear(aq_hw: self, location: location + i); |
906 | hw_atl_rpf_l4_spd_set(aq_hw: self, val: 0U, filter: location + i); |
907 | hw_atl_rpf_l4_dpd_set(aq_hw: self, val: 0U, filter: location + i); |
908 | } |
909 | hw_atl_rpfl3l4_ipv6_src_addr_clear(aq_hw: self, location); |
910 | hw_atl_rpfl3l4_ipv6_dest_addr_clear(aq_hw: self, location); |
911 | } |
912 | |
913 | return aq_hw_err_from_flags(hw: self); |
914 | } |
915 | |
916 | static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self, |
917 | struct aq_rx_filter_l3l4 *data) |
918 | { |
919 | u8 location = data->location; |
920 | |
921 | hw_atl_a0_hw_fl3l4_clear(self, data); |
922 | |
923 | if (data->cmd) { |
924 | if (!data->is_ipv6) { |
925 | hw_atl_rpfl3l4_ipv4_dest_addr_set(aq_hw: self, |
926 | location, |
927 | ipv4_dest: data->ip_dst[0]); |
928 | hw_atl_rpfl3l4_ipv4_src_addr_set(aq_hw: self, |
929 | location, |
930 | ipv4_src: data->ip_src[0]); |
931 | } else { |
932 | hw_atl_rpfl3l4_ipv6_dest_addr_set(aq_hw: self, |
933 | location, |
934 | ipv6_dest: data->ip_dst); |
935 | hw_atl_rpfl3l4_ipv6_src_addr_set(aq_hw: self, |
936 | location, |
937 | ipv6_src: data->ip_src); |
938 | } |
939 | } |
940 | hw_atl_rpf_l4_dpd_set(aq_hw: self, val: data->p_dst, filter: location); |
941 | hw_atl_rpf_l4_spd_set(aq_hw: self, val: data->p_src, filter: location); |
942 | hw_atl_rpfl3l4_cmd_set(aq_hw: self, location, cmd: data->cmd); |
943 | |
944 | return aq_hw_err_from_flags(hw: self); |
945 | } |
946 | |
947 | const struct aq_hw_ops hw_atl_ops_a0 = { |
948 | .hw_soft_reset = hw_atl_utils_soft_reset, |
949 | .hw_prepare = hw_atl_utils_initfw, |
950 | .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, |
951 | .hw_init = hw_atl_a0_hw_init, |
952 | .hw_reset = hw_atl_a0_hw_reset, |
953 | .hw_start = hw_atl_a0_hw_start, |
954 | .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, |
955 | .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, |
956 | .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, |
957 | .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, |
958 | .hw_stop = hw_atl_a0_hw_stop, |
959 | |
960 | .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, |
961 | .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, |
962 | |
963 | .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, |
964 | .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, |
965 | |
966 | .hw_irq_enable = hw_atl_a0_hw_irq_enable, |
967 | .hw_irq_disable = hw_atl_a0_hw_irq_disable, |
968 | .hw_irq_read = hw_atl_a0_hw_irq_read, |
969 | |
970 | .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, |
971 | .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, |
972 | .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, |
973 | .hw_filter_l3l4_set = hw_atl_a0_hw_fl3l4_set, |
974 | .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, |
975 | .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, |
976 | .hw_rss_set = hw_atl_a0_hw_rss_set, |
977 | .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, |
978 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
979 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
980 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
981 | }; |
982 | |