1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2/*
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/string.h>
9#include <linux/of.h>
10#include <linux/of_platform.h>
11#include <linux/platform_device.h>
12#include <linux/net_tstamp.h>
13#include <linux/fsl/ptp_qoriq.h>
14
15#include "dpaa_eth.h"
16#include "mac.h"
17
18static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
19 "interrupts",
20 "rx packets",
21 "tx packets",
22 "tx confirm",
23 "tx S/G",
24 "tx error",
25 "rx error",
26 "rx dropped",
27 "tx dropped",
28};
29
30static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
31 /* dpa rx errors */
32 "rx dma error",
33 "rx frame physical error",
34 "rx frame size error",
35 "rx header error",
36
37 /* demultiplexing errors */
38 "qman cg_tdrop",
39 "qman wred",
40 "qman error cond",
41 "qman early window",
42 "qman late window",
43 "qman fq tdrop",
44 "qman fq retired",
45 "qman orp disabled",
46
47 /* congestion related stats */
48 "congestion time (ms)",
49 "entered congestion",
50 "congested (0/1)"
51};
52
53#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
54#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
55
56static int dpaa_get_link_ksettings(struct net_device *net_dev,
57 struct ethtool_link_ksettings *cmd)
58{
59 struct dpaa_priv *priv = netdev_priv(dev: net_dev);
60 struct mac_device *mac_dev = priv->mac_dev;
61
62 return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
63}
64
65static int dpaa_set_link_ksettings(struct net_device *net_dev,
66 const struct ethtool_link_ksettings *cmd)
67{
68 struct dpaa_priv *priv = netdev_priv(dev: net_dev);
69 struct mac_device *mac_dev = priv->mac_dev;
70
71 return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
72}
73
74static void dpaa_get_drvinfo(struct net_device *net_dev,
75 struct ethtool_drvinfo *drvinfo)
76{
77 strscpy(drvinfo->driver, KBUILD_MODNAME,
78 sizeof(drvinfo->driver));
79 strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
80 sizeof(drvinfo->bus_info));
81}
82
83static u32 dpaa_get_msglevel(struct net_device *net_dev)
84{
85 return ((struct dpaa_priv *)netdev_priv(dev: net_dev))->msg_enable;
86}
87
88static void dpaa_set_msglevel(struct net_device *net_dev,
89 u32 msg_enable)
90{
91 ((struct dpaa_priv *)netdev_priv(dev: net_dev))->msg_enable = msg_enable;
92}
93
94static int dpaa_nway_reset(struct net_device *net_dev)
95{
96 struct dpaa_priv *priv = netdev_priv(dev: net_dev);
97 struct mac_device *mac_dev = priv->mac_dev;
98
99 return phylink_ethtool_nway_reset(mac_dev->phylink);
100}
101
102static void dpaa_get_pauseparam(struct net_device *net_dev,
103 struct ethtool_pauseparam *epause)
104{
105 struct dpaa_priv *priv = netdev_priv(dev: net_dev);
106 struct mac_device *mac_dev = priv->mac_dev;
107
108 phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
109}
110
111static int dpaa_set_pauseparam(struct net_device *net_dev,
112 struct ethtool_pauseparam *epause)
113{
114 struct dpaa_priv *priv = netdev_priv(dev: net_dev);
115 struct mac_device *mac_dev = priv->mac_dev;
116
117 return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
118}
119
120static int dpaa_get_sset_count(struct net_device *net_dev, int type)
121{
122 unsigned int total_stats, num_stats;
123
124 num_stats = num_online_cpus() + 1;
125 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
126 DPAA_STATS_GLOBAL_LEN;
127
128 switch (type) {
129 case ETH_SS_STATS:
130 return total_stats;
131 default:
132 return -EOPNOTSUPP;
133 }
134}
135
136static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
137 int crr_cpu, u64 bp_count, u64 *data)
138{
139 int num_values = num_cpus + 1;
140 int crr = 0;
141
142 /* update current CPU's stats and also add them to the total values */
143 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
144 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
145
146 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
147 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
148
149 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
150 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
151
152 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
153 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
154
155 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
156 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
157
158 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
159 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
160
161 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
162 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
163
164 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
165 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
166
167 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
168 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
169
170 data[crr * num_values + crr_cpu] = bp_count;
171 data[crr++ * num_values + num_cpus] += bp_count;
172}
173
174static void dpaa_get_ethtool_stats(struct net_device *net_dev,
175 struct ethtool_stats *stats, u64 *data)
176{
177 struct dpaa_percpu_priv *percpu_priv;
178 struct dpaa_rx_errors rx_errors;
179 unsigned int num_cpus, offset;
180 u64 bp_count, cg_time, cg_num;
181 struct dpaa_ern_cnt ern_cnt;
182 struct dpaa_bp *dpaa_bp;
183 struct dpaa_priv *priv;
184 int total_stats, i;
185 bool cg_status;
186
187 total_stats = dpaa_get_sset_count(net_dev, type: ETH_SS_STATS);
188 priv = netdev_priv(dev: net_dev);
189 num_cpus = num_online_cpus();
190
191 memset(&bp_count, 0, sizeof(bp_count));
192 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
193 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
194 memset(data, 0, total_stats * sizeof(u64));
195
196 for_each_online_cpu(i) {
197 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
198 dpaa_bp = priv->dpaa_bp;
199 if (!dpaa_bp->percpu_count)
200 continue;
201 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
202 rx_errors.dme += percpu_priv->rx_errors.dme;
203 rx_errors.fpe += percpu_priv->rx_errors.fpe;
204 rx_errors.fse += percpu_priv->rx_errors.fse;
205 rx_errors.phe += percpu_priv->rx_errors.phe;
206
207 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
208 ern_cnt.wred += percpu_priv->ern_cnt.wred;
209 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
210 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
211 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
212 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
213 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
214 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
215
216 copy_stats(percpu_priv, num_cpus, crr_cpu: i, bp_count, data);
217 }
218
219 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
220 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
221
222 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
223 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
224
225 /* gather congestion related counters */
226 cg_num = 0;
227 cg_status = false;
228 cg_time = jiffies_to_msecs(j: priv->cgr_data.congested_jiffies);
229 if (qman_query_cgr_congested(cgr: &priv->cgr_data.cgr, result: &cg_status) == 0) {
230 cg_num = priv->cgr_data.cgr_congested_count;
231
232 /* reset congestion stats (like QMan API does */
233 priv->cgr_data.congested_jiffies = 0;
234 priv->cgr_data.cgr_congested_count = 0;
235 }
236
237 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
238 data[offset++] = cg_time;
239 data[offset++] = cg_num;
240 data[offset++] = cg_status;
241}
242
243static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
244 u8 *data)
245{
246 unsigned int i, j, num_cpus, size;
247 char string_cpu[ETH_GSTRING_LEN];
248 u8 *strings;
249
250 memset(string_cpu, 0, sizeof(string_cpu));
251 strings = data;
252 num_cpus = num_online_cpus();
253 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
254
255 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
256 for (j = 0; j < num_cpus; j++) {
257 snprintf(buf: string_cpu, size: ETH_GSTRING_LEN, fmt: "%s [CPU %d]",
258 dpaa_stats_percpu[i], j);
259 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
260 strings += ETH_GSTRING_LEN;
261 }
262 snprintf(buf: string_cpu, size: ETH_GSTRING_LEN, fmt: "%s [TOTAL]",
263 dpaa_stats_percpu[i]);
264 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
265 strings += ETH_GSTRING_LEN;
266 }
267 for (j = 0; j < num_cpus; j++) {
268 snprintf(buf: string_cpu, size: ETH_GSTRING_LEN,
269 fmt: "bpool [CPU %d]", j);
270 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
271 strings += ETH_GSTRING_LEN;
272 }
273 snprintf(buf: string_cpu, size: ETH_GSTRING_LEN, fmt: "bpool [TOTAL]");
274 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
275 strings += ETH_GSTRING_LEN;
276
277 memcpy(strings, dpaa_stats_global, size);
278}
279
280static int dpaa_get_hash_opts(struct net_device *dev,
281 struct ethtool_rxnfc *cmd)
282{
283 struct dpaa_priv *priv = netdev_priv(dev);
284
285 cmd->data = 0;
286
287 switch (cmd->flow_type) {
288 case TCP_V4_FLOW:
289 case TCP_V6_FLOW:
290 case UDP_V4_FLOW:
291 case UDP_V6_FLOW:
292 if (priv->keygen_in_use)
293 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
294 fallthrough;
295 case IPV4_FLOW:
296 case IPV6_FLOW:
297 case SCTP_V4_FLOW:
298 case SCTP_V6_FLOW:
299 case AH_ESP_V4_FLOW:
300 case AH_ESP_V6_FLOW:
301 case AH_V4_FLOW:
302 case AH_V6_FLOW:
303 case ESP_V4_FLOW:
304 case ESP_V6_FLOW:
305 if (priv->keygen_in_use)
306 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
307 break;
308 default:
309 cmd->data = 0;
310 break;
311 }
312
313 return 0;
314}
315
316static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
317 u32 *unused)
318{
319 int ret = -EOPNOTSUPP;
320
321 switch (cmd->cmd) {
322 case ETHTOOL_GRXFH:
323 ret = dpaa_get_hash_opts(dev, cmd);
324 break;
325 default:
326 break;
327 }
328
329 return ret;
330}
331
332static void dpaa_set_hash(struct net_device *net_dev, bool enable)
333{
334 struct mac_device *mac_dev;
335 struct fman_port *rxport;
336 struct dpaa_priv *priv;
337
338 priv = netdev_priv(dev: net_dev);
339 mac_dev = priv->mac_dev;
340 rxport = mac_dev->port[0];
341
342 fman_port_use_kg_hash(rxport, enable);
343 priv->keygen_in_use = enable;
344}
345
346static int dpaa_set_hash_opts(struct net_device *dev,
347 struct ethtool_rxnfc *nfc)
348{
349 int ret = -EINVAL;
350
351 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
352 if (nfc->data &
353 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
354 return -EINVAL;
355
356 switch (nfc->flow_type) {
357 case TCP_V4_FLOW:
358 case TCP_V6_FLOW:
359 case UDP_V4_FLOW:
360 case UDP_V6_FLOW:
361 case IPV4_FLOW:
362 case IPV6_FLOW:
363 case SCTP_V4_FLOW:
364 case SCTP_V6_FLOW:
365 case AH_ESP_V4_FLOW:
366 case AH_ESP_V6_FLOW:
367 case AH_V4_FLOW:
368 case AH_V6_FLOW:
369 case ESP_V4_FLOW:
370 case ESP_V6_FLOW:
371 dpaa_set_hash(net_dev: dev, enable: !!nfc->data);
372 ret = 0;
373 break;
374 default:
375 break;
376 }
377
378 return ret;
379}
380
381static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
382{
383 int ret = -EOPNOTSUPP;
384
385 switch (cmd->cmd) {
386 case ETHTOOL_SRXFH:
387 ret = dpaa_set_hash_opts(dev, nfc: cmd);
388 break;
389 default:
390 break;
391 }
392
393 return ret;
394}
395
396static int dpaa_get_ts_info(struct net_device *net_dev,
397 struct ethtool_ts_info *info)
398{
399 struct device *dev = net_dev->dev.parent;
400 struct device_node *mac_node = dev->of_node;
401 struct device_node *fman_node = NULL, *ptp_node = NULL;
402 struct platform_device *ptp_dev = NULL;
403 struct ptp_qoriq *ptp = NULL;
404
405 info->phc_index = -1;
406
407 fman_node = of_get_parent(node: mac_node);
408 if (fman_node) {
409 ptp_node = of_parse_phandle(np: fman_node, phandle_name: "ptimer-handle", index: 0);
410 of_node_put(node: fman_node);
411 }
412
413 if (ptp_node) {
414 ptp_dev = of_find_device_by_node(np: ptp_node);
415 of_node_put(node: ptp_node);
416 }
417
418 if (ptp_dev)
419 ptp = platform_get_drvdata(pdev: ptp_dev);
420
421 if (ptp)
422 info->phc_index = ptp->phc_index;
423
424 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
425 SOF_TIMESTAMPING_RX_HARDWARE |
426 SOF_TIMESTAMPING_RAW_HARDWARE;
427 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
428 (1 << HWTSTAMP_TX_ON);
429 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
430 (1 << HWTSTAMP_FILTER_ALL);
431
432 return 0;
433}
434
435static int dpaa_get_coalesce(struct net_device *dev,
436 struct ethtool_coalesce *c,
437 struct kernel_ethtool_coalesce *kernel_coal,
438 struct netlink_ext_ack *extack)
439{
440 struct qman_portal *portal;
441 u32 period;
442 u8 thresh;
443
444 portal = qman_get_affine_portal(smp_processor_id());
445 qman_portal_get_iperiod(portal, iperiod: &period);
446 qman_dqrr_get_ithresh(portal, ithresh: &thresh);
447
448 c->rx_coalesce_usecs = period;
449 c->rx_max_coalesced_frames = thresh;
450
451 return 0;
452}
453
454static int dpaa_set_coalesce(struct net_device *dev,
455 struct ethtool_coalesce *c,
456 struct kernel_ethtool_coalesce *kernel_coal,
457 struct netlink_ext_ack *extack)
458{
459 const cpumask_t *cpus = qman_affine_cpus();
460 bool needs_revert[NR_CPUS] = {false};
461 struct qman_portal *portal;
462 u32 period, prev_period;
463 u8 thresh, prev_thresh;
464 int cpu, res;
465
466 period = c->rx_coalesce_usecs;
467 thresh = c->rx_max_coalesced_frames;
468
469 /* save previous values */
470 portal = qman_get_affine_portal(smp_processor_id());
471 qman_portal_get_iperiod(portal, iperiod: &prev_period);
472 qman_dqrr_get_ithresh(portal, ithresh: &prev_thresh);
473
474 /* set new values */
475 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
476 portal = qman_get_affine_portal(cpu);
477 res = qman_portal_set_iperiod(portal, iperiod: period);
478 if (res)
479 goto revert_values;
480 res = qman_dqrr_set_ithresh(portal, ithresh: thresh);
481 if (res) {
482 qman_portal_set_iperiod(portal, iperiod: prev_period);
483 goto revert_values;
484 }
485 needs_revert[cpu] = true;
486 }
487
488 return 0;
489
490revert_values:
491 /* restore previous values */
492 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
493 if (!needs_revert[cpu])
494 continue;
495 portal = qman_get_affine_portal(cpu);
496 /* previous values will not fail, ignore return value */
497 qman_portal_set_iperiod(portal, iperiod: prev_period);
498 qman_dqrr_set_ithresh(portal, ithresh: prev_thresh);
499 }
500
501 return res;
502}
503
504const struct ethtool_ops dpaa_ethtool_ops = {
505 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
506 ETHTOOL_COALESCE_RX_MAX_FRAMES,
507 .get_drvinfo = dpaa_get_drvinfo,
508 .get_msglevel = dpaa_get_msglevel,
509 .set_msglevel = dpaa_set_msglevel,
510 .nway_reset = dpaa_nway_reset,
511 .get_pauseparam = dpaa_get_pauseparam,
512 .set_pauseparam = dpaa_set_pauseparam,
513 .get_link = ethtool_op_get_link,
514 .get_sset_count = dpaa_get_sset_count,
515 .get_ethtool_stats = dpaa_get_ethtool_stats,
516 .get_strings = dpaa_get_strings,
517 .get_link_ksettings = dpaa_get_link_ksettings,
518 .set_link_ksettings = dpaa_set_link_ksettings,
519 .get_rxnfc = dpaa_get_rxnfc,
520 .set_rxnfc = dpaa_set_rxnfc,
521 .get_ts_info = dpaa_get_ts_info,
522 .get_coalesce = dpaa_get_coalesce,
523 .set_coalesce = dpaa_set_coalesce,
524};
525

source code of linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c