1 | /* |
2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #ifndef __MLX5_EN_STATS_H__ |
34 | #define __MLX5_EN_STATS_H__ |
35 | |
36 | #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \ |
37 | (*(u64 *)((char *)ptr + dsc[i].offset)) |
38 | #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \ |
39 | be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset)) |
40 | #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ |
41 | (*(u32 *)((char *)ptr + dsc[i].offset)) |
42 | #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ |
43 | be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) |
44 | |
45 | #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) |
46 | #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) |
47 | #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) |
48 | #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld) |
49 | #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld) |
50 | #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld) |
51 | #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld) |
52 | #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) |
53 | |
54 | #define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) |
55 | #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) |
56 | #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) |
57 | #define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld) |
58 | |
59 | #define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld) |
60 | |
61 | struct counter_desc { |
62 | char format[ETH_GSTRING_LEN]; |
63 | size_t offset; /* Byte offset */ |
64 | }; |
65 | |
66 | enum { |
67 | MLX5E_NDO_UPDATE_STATS = BIT(0x1), |
68 | }; |
69 | |
70 | struct mlx5e_priv; |
71 | struct mlx5e_stats_grp { |
72 | u16 update_stats_mask; |
73 | int (*get_num_stats)(struct mlx5e_priv *priv); |
74 | int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); |
75 | int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); |
76 | void (*update_stats)(struct mlx5e_priv *priv); |
77 | }; |
78 | |
79 | typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t; |
80 | |
81 | #define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name |
82 | |
83 | #define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \ |
84 | int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv) |
85 | |
86 | #define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \ |
87 | void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv) |
88 | |
89 | #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \ |
90 | int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx) |
91 | |
92 | #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \ |
93 | int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx) |
94 | |
95 | #define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp |
96 | |
97 | #define MLX5E_DECLARE_STATS_GRP(grp) \ |
98 | const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp) |
99 | |
100 | #define MLX5E_DEFINE_STATS_GRP(grp, mask) \ |
101 | MLX5E_DECLARE_STATS_GRP(grp) = { \ |
102 | .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \ |
103 | .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \ |
104 | .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \ |
105 | .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \ |
106 | .update_stats_mask = mask, \ |
107 | } |
108 | |
109 | unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv); |
110 | void mlx5e_stats_update(struct mlx5e_priv *priv); |
111 | void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx); |
112 | void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data); |
113 | void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv); |
114 | |
115 | void mlx5e_stats_pause_get(struct mlx5e_priv *priv, |
116 | struct ethtool_pause_stats *pause_stats); |
117 | void mlx5e_stats_fec_get(struct mlx5e_priv *priv, |
118 | struct ethtool_fec_stats *fec_stats); |
119 | |
120 | void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, |
121 | struct ethtool_eth_phy_stats *phy_stats); |
122 | void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, |
123 | struct ethtool_eth_mac_stats *mac_stats); |
124 | void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, |
125 | struct ethtool_eth_ctrl_stats *ctrl_stats); |
126 | void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, |
127 | struct ethtool_rmon_stats *rmon, |
128 | const struct ethtool_rmon_hist_range **ranges); |
129 | void mlx5e_get_link_ext_stats(struct net_device *dev, |
130 | struct ethtool_link_ext_stats *stats); |
131 | |
132 | /* Concrete NIC Stats */ |
133 | |
134 | struct mlx5e_sw_stats { |
135 | u64 rx_packets; |
136 | u64 rx_bytes; |
137 | u64 tx_packets; |
138 | u64 tx_bytes; |
139 | u64 tx_tso_packets; |
140 | u64 tx_tso_bytes; |
141 | u64 tx_tso_inner_packets; |
142 | u64 tx_tso_inner_bytes; |
143 | u64 tx_added_vlan_packets; |
144 | u64 tx_nop; |
145 | u64 tx_mpwqe_blks; |
146 | u64 tx_mpwqe_pkts; |
147 | u64 rx_lro_packets; |
148 | u64 rx_lro_bytes; |
149 | u64 rx_gro_packets; |
150 | u64 rx_gro_bytes; |
151 | u64 rx_gro_skbs; |
152 | u64 rx_gro_match_packets; |
153 | u64 rx_gro_large_hds; |
154 | u64 rx_mcast_packets; |
155 | u64 rx_ecn_mark; |
156 | u64 rx_removed_vlan_packets; |
157 | u64 rx_csum_unnecessary; |
158 | u64 rx_csum_none; |
159 | u64 rx_csum_complete; |
160 | u64 rx_csum_complete_tail; |
161 | u64 rx_csum_complete_tail_slow; |
162 | u64 rx_csum_unnecessary_inner; |
163 | u64 rx_xdp_drop; |
164 | u64 rx_xdp_redirect; |
165 | u64 rx_xdp_tx_xmit; |
166 | u64 rx_xdp_tx_mpwqe; |
167 | u64 rx_xdp_tx_inlnw; |
168 | u64 rx_xdp_tx_nops; |
169 | u64 rx_xdp_tx_full; |
170 | u64 rx_xdp_tx_err; |
171 | u64 rx_xdp_tx_cqe; |
172 | u64 tx_csum_none; |
173 | u64 tx_csum_partial; |
174 | u64 tx_csum_partial_inner; |
175 | u64 tx_queue_stopped; |
176 | u64 tx_queue_dropped; |
177 | u64 tx_xmit_more; |
178 | u64 tx_recover; |
179 | u64 tx_cqes; |
180 | u64 tx_queue_wake; |
181 | u64 tx_cqe_err; |
182 | u64 tx_xdp_xmit; |
183 | u64 tx_xdp_mpwqe; |
184 | u64 tx_xdp_inlnw; |
185 | u64 tx_xdp_nops; |
186 | u64 tx_xdp_full; |
187 | u64 tx_xdp_err; |
188 | u64 tx_xdp_cqes; |
189 | u64 rx_wqe_err; |
190 | u64 rx_mpwqe_filler_cqes; |
191 | u64 rx_mpwqe_filler_strides; |
192 | u64 rx_oversize_pkts_sw_drop; |
193 | u64 rx_buff_alloc_err; |
194 | u64 rx_cqe_compress_blks; |
195 | u64 rx_cqe_compress_pkts; |
196 | u64 rx_congst_umr; |
197 | #ifdef CONFIG_MLX5_EN_ARFS |
198 | u64 rx_arfs_add; |
199 | u64 rx_arfs_request_in; |
200 | u64 rx_arfs_request_out; |
201 | u64 rx_arfs_expired; |
202 | u64 rx_arfs_err; |
203 | #endif |
204 | u64 rx_recover; |
205 | u64 ch_events; |
206 | u64 ch_poll; |
207 | u64 ch_arm; |
208 | u64 ch_aff_change; |
209 | u64 ch_force_irq; |
210 | u64 ch_eq_rearm; |
211 | #ifdef CONFIG_PAGE_POOL_STATS |
212 | u64 rx_pp_alloc_fast; |
213 | u64 rx_pp_alloc_slow; |
214 | u64 rx_pp_alloc_slow_high_order; |
215 | u64 rx_pp_alloc_empty; |
216 | u64 rx_pp_alloc_refill; |
217 | u64 rx_pp_alloc_waive; |
218 | u64 rx_pp_recycle_cached; |
219 | u64 rx_pp_recycle_cache_full; |
220 | u64 rx_pp_recycle_ring; |
221 | u64 rx_pp_recycle_ring_full; |
222 | u64 rx_pp_recycle_released_ref; |
223 | #endif |
224 | #ifdef CONFIG_MLX5_EN_TLS |
225 | u64 tx_tls_encrypted_packets; |
226 | u64 tx_tls_encrypted_bytes; |
227 | u64 tx_tls_ooo; |
228 | u64 tx_tls_dump_packets; |
229 | u64 tx_tls_dump_bytes; |
230 | u64 tx_tls_resync_bytes; |
231 | u64 tx_tls_skip_no_sync_data; |
232 | u64 tx_tls_drop_no_sync_data; |
233 | u64 tx_tls_drop_bypass_req; |
234 | |
235 | u64 rx_tls_decrypted_packets; |
236 | u64 rx_tls_decrypted_bytes; |
237 | u64 rx_tls_resync_req_pkt; |
238 | u64 rx_tls_resync_req_start; |
239 | u64 rx_tls_resync_req_end; |
240 | u64 rx_tls_resync_req_skip; |
241 | u64 rx_tls_resync_res_ok; |
242 | u64 rx_tls_resync_res_retry; |
243 | u64 rx_tls_resync_res_skip; |
244 | u64 rx_tls_err; |
245 | #endif |
246 | |
247 | u64 rx_xsk_packets; |
248 | u64 rx_xsk_bytes; |
249 | u64 rx_xsk_csum_complete; |
250 | u64 rx_xsk_csum_unnecessary; |
251 | u64 rx_xsk_csum_unnecessary_inner; |
252 | u64 rx_xsk_csum_none; |
253 | u64 rx_xsk_ecn_mark; |
254 | u64 rx_xsk_removed_vlan_packets; |
255 | u64 rx_xsk_xdp_drop; |
256 | u64 rx_xsk_xdp_redirect; |
257 | u64 rx_xsk_wqe_err; |
258 | u64 rx_xsk_mpwqe_filler_cqes; |
259 | u64 rx_xsk_mpwqe_filler_strides; |
260 | u64 rx_xsk_oversize_pkts_sw_drop; |
261 | u64 rx_xsk_buff_alloc_err; |
262 | u64 rx_xsk_cqe_compress_blks; |
263 | u64 rx_xsk_cqe_compress_pkts; |
264 | u64 rx_xsk_congst_umr; |
265 | u64 tx_xsk_xmit; |
266 | u64 tx_xsk_mpwqe; |
267 | u64 tx_xsk_inlnw; |
268 | u64 tx_xsk_full; |
269 | u64 tx_xsk_err; |
270 | u64 tx_xsk_cqes; |
271 | }; |
272 | |
273 | struct mlx5e_qcounter_stats { |
274 | u32 rx_out_of_buffer; |
275 | u32 rx_if_down_packets; |
276 | }; |
277 | |
278 | #define VNIC_ENV_GET(vnic_env_stats, c) \ |
279 | MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \ |
280 | vport_env.c) |
281 | |
282 | struct mlx5e_vnic_env_stats { |
283 | __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; |
284 | }; |
285 | |
286 | #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ |
287 | vstats->query_vport_out, c) |
288 | |
289 | struct mlx5e_vport_stats { |
290 | __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; |
291 | }; |
292 | |
293 | #define PPORT_802_3_GET(pstats, c) \ |
294 | MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ |
295 | counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) |
296 | #define PPORT_2863_GET(pstats, c) \ |
297 | MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ |
298 | counter_set.eth_2863_cntrs_grp_data_layout.c##_high) |
299 | #define PPORT_2819_GET(pstats, c) \ |
300 | MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ |
301 | counter_set.eth_2819_cntrs_grp_data_layout.c##_high) |
302 | #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ |
303 | MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ |
304 | counter_set.phys_layer_statistical_cntrs.c##_high) |
305 | #define PPORT_PER_PRIO_GET(pstats, prio, c) \ |
306 | MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ |
307 | counter_set.eth_per_prio_grp_data_layout.c##_high) |
308 | #define NUM_PPORT_PRIO 8 |
309 | #define PPORT_ETH_EXT_GET(pstats, c) \ |
310 | MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \ |
311 | counter_set.eth_extended_cntrs_grp_data_layout.c##_high) |
312 | |
313 | struct mlx5e_pport_stats { |
314 | __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
315 | __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
316 | __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
317 | __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; |
318 | __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
319 | __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
320 | __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; |
321 | __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; |
322 | __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; |
323 | }; |
324 | |
325 | #define PCIE_PERF_GET(pcie_stats, c) \ |
326 | MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ |
327 | counter_set.pcie_perf_cntrs_grp_data_layout.c) |
328 | |
329 | #define PCIE_PERF_GET64(pcie_stats, c) \ |
330 | MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ |
331 | counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) |
332 | |
333 | struct mlx5e_pcie_stats { |
334 | __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; |
335 | }; |
336 | |
337 | struct mlx5e_rq_stats { |
338 | u64 packets; |
339 | u64 bytes; |
340 | u64 csum_complete; |
341 | u64 csum_complete_tail; |
342 | u64 csum_complete_tail_slow; |
343 | u64 csum_unnecessary; |
344 | u64 csum_unnecessary_inner; |
345 | u64 csum_none; |
346 | u64 lro_packets; |
347 | u64 lro_bytes; |
348 | u64 gro_packets; |
349 | u64 gro_bytes; |
350 | u64 gro_skbs; |
351 | u64 gro_match_packets; |
352 | u64 gro_large_hds; |
353 | u64 mcast_packets; |
354 | u64 ecn_mark; |
355 | u64 removed_vlan_packets; |
356 | u64 xdp_drop; |
357 | u64 xdp_redirect; |
358 | u64 wqe_err; |
359 | u64 mpwqe_filler_cqes; |
360 | u64 mpwqe_filler_strides; |
361 | u64 oversize_pkts_sw_drop; |
362 | u64 buff_alloc_err; |
363 | u64 cqe_compress_blks; |
364 | u64 cqe_compress_pkts; |
365 | u64 congst_umr; |
366 | #ifdef CONFIG_MLX5_EN_ARFS |
367 | u64 arfs_add; |
368 | u64 arfs_request_in; |
369 | u64 arfs_request_out; |
370 | u64 arfs_expired; |
371 | u64 arfs_err; |
372 | #endif |
373 | u64 recover; |
374 | #ifdef CONFIG_PAGE_POOL_STATS |
375 | u64 pp_alloc_fast; |
376 | u64 pp_alloc_slow; |
377 | u64 pp_alloc_slow_high_order; |
378 | u64 pp_alloc_empty; |
379 | u64 pp_alloc_refill; |
380 | u64 pp_alloc_waive; |
381 | u64 pp_recycle_cached; |
382 | u64 pp_recycle_cache_full; |
383 | u64 pp_recycle_ring; |
384 | u64 pp_recycle_ring_full; |
385 | u64 pp_recycle_released_ref; |
386 | #endif |
387 | #ifdef CONFIG_MLX5_EN_TLS |
388 | u64 tls_decrypted_packets; |
389 | u64 tls_decrypted_bytes; |
390 | u64 tls_resync_req_pkt; |
391 | u64 tls_resync_req_start; |
392 | u64 tls_resync_req_end; |
393 | u64 tls_resync_req_skip; |
394 | u64 tls_resync_res_ok; |
395 | u64 tls_resync_res_retry; |
396 | u64 tls_resync_res_skip; |
397 | u64 tls_err; |
398 | #endif |
399 | }; |
400 | |
401 | struct mlx5e_sq_stats { |
402 | /* commonly accessed in data path */ |
403 | u64 packets; |
404 | u64 bytes; |
405 | u64 xmit_more; |
406 | u64 tso_packets; |
407 | u64 tso_bytes; |
408 | u64 tso_inner_packets; |
409 | u64 tso_inner_bytes; |
410 | u64 csum_partial; |
411 | u64 csum_partial_inner; |
412 | u64 added_vlan_packets; |
413 | u64 nop; |
414 | u64 mpwqe_blks; |
415 | u64 mpwqe_pkts; |
416 | #ifdef CONFIG_MLX5_EN_TLS |
417 | u64 tls_encrypted_packets; |
418 | u64 tls_encrypted_bytes; |
419 | u64 tls_ooo; |
420 | u64 tls_dump_packets; |
421 | u64 tls_dump_bytes; |
422 | u64 tls_resync_bytes; |
423 | u64 tls_skip_no_sync_data; |
424 | u64 tls_drop_no_sync_data; |
425 | u64 tls_drop_bypass_req; |
426 | #endif |
427 | /* less likely accessed in data path */ |
428 | u64 csum_none; |
429 | u64 stopped; |
430 | u64 dropped; |
431 | u64 recover; |
432 | /* dirtied @completion */ |
433 | u64 cqes ____cacheline_aligned_in_smp; |
434 | u64 wake; |
435 | u64 cqe_err; |
436 | }; |
437 | |
438 | struct mlx5e_xdpsq_stats { |
439 | u64 xmit; |
440 | u64 mpwqe; |
441 | u64 inlnw; |
442 | u64 nops; |
443 | u64 full; |
444 | u64 err; |
445 | /* dirtied @completion */ |
446 | u64 cqes ____cacheline_aligned_in_smp; |
447 | }; |
448 | |
449 | struct mlx5e_ch_stats { |
450 | u64 events; |
451 | u64 poll; |
452 | u64 arm; |
453 | u64 aff_change; |
454 | u64 force_irq; |
455 | u64 eq_rearm; |
456 | }; |
457 | |
458 | struct mlx5e_ptp_cq_stats { |
459 | u64 cqe; |
460 | u64 err_cqe; |
461 | u64 abort; |
462 | u64 abort_abs_diff_ns; |
463 | u64 late_cqe; |
464 | }; |
465 | |
466 | struct mlx5e_rep_stats { |
467 | u64 vport_rx_packets; |
468 | u64 vport_tx_packets; |
469 | u64 vport_rx_bytes; |
470 | u64 vport_tx_bytes; |
471 | u64 rx_vport_rdma_unicast_packets; |
472 | u64 tx_vport_rdma_unicast_packets; |
473 | u64 rx_vport_rdma_unicast_bytes; |
474 | u64 tx_vport_rdma_unicast_bytes; |
475 | u64 rx_vport_rdma_multicast_packets; |
476 | u64 tx_vport_rdma_multicast_packets; |
477 | u64 rx_vport_rdma_multicast_bytes; |
478 | u64 tx_vport_rdma_multicast_bytes; |
479 | u64 vport_loopback_packets; |
480 | u64 vport_loopback_bytes; |
481 | }; |
482 | |
483 | struct mlx5e_stats { |
484 | struct mlx5e_sw_stats sw; |
485 | struct mlx5e_qcounter_stats qcnt; |
486 | struct mlx5e_vnic_env_stats vnic; |
487 | struct mlx5e_vport_stats vport; |
488 | struct mlx5e_pport_stats pport; |
489 | struct mlx5e_pcie_stats pcie; |
490 | struct mlx5e_rep_stats rep_stats; |
491 | }; |
492 | |
493 | static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport, |
494 | struct mlx5e_rep_stats *rep_stats) |
495 | { |
496 | memset(vf_vport, 0, sizeof(*vf_vport)); |
497 | vf_vport->rx_packets = rep_stats->vport_rx_packets; |
498 | vf_vport->tx_packets = rep_stats->vport_tx_packets; |
499 | vf_vport->rx_bytes = rep_stats->vport_rx_bytes; |
500 | vf_vport->tx_bytes = rep_stats->vport_tx_bytes; |
501 | } |
502 | |
503 | extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[]; |
504 | unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv); |
505 | |
506 | extern MLX5E_DECLARE_STATS_GRP(sw); |
507 | extern MLX5E_DECLARE_STATS_GRP(qcnt); |
508 | extern MLX5E_DECLARE_STATS_GRP(vnic_env); |
509 | extern MLX5E_DECLARE_STATS_GRP(vport); |
510 | extern MLX5E_DECLARE_STATS_GRP(802_3); |
511 | extern MLX5E_DECLARE_STATS_GRP(2863); |
512 | extern MLX5E_DECLARE_STATS_GRP(2819); |
513 | extern MLX5E_DECLARE_STATS_GRP(phy); |
514 | extern MLX5E_DECLARE_STATS_GRP(eth_ext); |
515 | extern MLX5E_DECLARE_STATS_GRP(pcie); |
516 | extern MLX5E_DECLARE_STATS_GRP(per_prio); |
517 | extern MLX5E_DECLARE_STATS_GRP(pme); |
518 | extern MLX5E_DECLARE_STATS_GRP(channels); |
519 | extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); |
520 | extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); |
521 | extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); |
522 | extern MLX5E_DECLARE_STATS_GRP(ptp); |
523 | extern MLX5E_DECLARE_STATS_GRP(macsec_hw); |
524 | |
525 | #endif /* __MLX5_EN_STATS_H__ */ |
526 | |