1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4#include <linux/etherdevice.h>
5#include <linux/lockdep.h>
6#include <linux/pci.h>
7#include <linux/skbuff.h>
8#include <linux/vmalloc.h>
9#include <net/devlink.h>
10#include <net/dst_metadata.h>
11
12#include "main.h"
13#include "../nfpcore/nfp_cpp.h"
14#include "../nfpcore/nfp_nffw.h"
15#include "../nfpcore/nfp_nsp.h"
16#include "../nfp_app.h"
17#include "../nfp_main.h"
18#include "../nfp_net.h"
19#include "../nfp_net_repr.h"
20#include "../nfp_port.h"
21#include "./cmsg.h"
22
23#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24
25#define NFP_MIN_INT_PORT_ID 1
26#define NFP_MAX_INT_PORT_ID 256
27
28static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
29{
30 return "FLOWER";
31}
32
33static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
34{
35 return DEVLINK_ESWITCH_MODE_SWITCHDEV;
36}
37
38static int
39nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
40 struct net_device *netdev)
41{
42 struct net_device *entry;
43 int i, id = 0;
44
45 rcu_read_lock();
46 idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
47 if (entry == netdev) {
48 id = i;
49 break;
50 }
51 rcu_read_unlock();
52
53 return id;
54}
55
56static int
57nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
58{
59 struct nfp_flower_priv *priv = app->priv;
60 int id;
61
62 id = nfp_flower_lookup_internal_port_id(priv, netdev);
63 if (id > 0)
64 return id;
65
66 idr_preload(GFP_ATOMIC);
67 spin_lock_bh(lock: &priv->internal_ports.lock);
68 id = idr_alloc(&priv->internal_ports.port_ids, ptr: netdev,
69 NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
70 spin_unlock_bh(lock: &priv->internal_ports.lock);
71 idr_preload_end();
72
73 return id;
74}
75
76u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
77 struct net_device *netdev)
78{
79 struct nfp_flower_priv *priv = app->priv;
80 int ext_port;
81 int gid;
82
83 if (nfp_netdev_is_nfp_repr(netdev)) {
84 return nfp_repr_get_port_id(netdev);
85 } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
86 ext_port = nfp_flower_get_internal_port_id(app, netdev);
87 if (ext_port < 0)
88 return 0;
89
90 return nfp_flower_internal_port_get_port_id(internal_port: ext_port);
91 } else if (netif_is_lag_master(dev: netdev) &&
92 priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) {
93 gid = nfp_flower_lag_get_output_id(app, master: netdev);
94 if (gid < 0)
95 return 0;
96
97 return (NFP_FL_LAG_OUT | gid);
98 }
99
100 return 0;
101}
102
103static struct net_device *
104nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
105{
106 struct nfp_flower_priv *priv = app->priv;
107 struct net_device *netdev;
108
109 rcu_read_lock();
110 netdev = idr_find(&priv->internal_ports.port_ids, id: port_id);
111 rcu_read_unlock();
112
113 return netdev;
114}
115
116static void
117nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
118{
119 struct nfp_flower_priv *priv = app->priv;
120 int id;
121
122 id = nfp_flower_lookup_internal_port_id(priv, netdev);
123 if (!id)
124 return;
125
126 spin_lock_bh(lock: &priv->internal_ports.lock);
127 idr_remove(&priv->internal_ports.port_ids, id);
128 spin_unlock_bh(lock: &priv->internal_ports.lock);
129}
130
131static int
132nfp_flower_internal_port_event_handler(struct nfp_app *app,
133 struct net_device *netdev,
134 unsigned long event)
135{
136 if (event == NETDEV_UNREGISTER &&
137 nfp_flower_internal_port_can_offload(app, netdev))
138 nfp_flower_free_internal_port_id(app, netdev);
139
140 return NOTIFY_OK;
141}
142
143static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
144{
145 spin_lock_init(&priv->internal_ports.lock);
146 idr_init(idr: &priv->internal_ports.port_ids);
147}
148
149static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
150{
151 idr_destroy(&priv->internal_ports.port_ids);
152}
153
154static struct nfp_flower_non_repr_priv *
155nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
156{
157 struct nfp_flower_priv *priv = app->priv;
158 struct nfp_flower_non_repr_priv *entry;
159
160 ASSERT_RTNL();
161
162 list_for_each_entry(entry, &priv->non_repr_priv, list)
163 if (entry->netdev == netdev)
164 return entry;
165
166 return NULL;
167}
168
169void
170__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
171{
172 non_repr_priv->ref_count++;
173}
174
175struct nfp_flower_non_repr_priv *
176nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
177{
178 struct nfp_flower_priv *priv = app->priv;
179 struct nfp_flower_non_repr_priv *entry;
180
181 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
182 if (entry)
183 goto inc_ref;
184
185 entry = kzalloc(size: sizeof(*entry), GFP_KERNEL);
186 if (!entry)
187 return NULL;
188
189 entry->netdev = netdev;
190 list_add(new: &entry->list, head: &priv->non_repr_priv);
191
192inc_ref:
193 __nfp_flower_non_repr_priv_get(non_repr_priv: entry);
194 return entry;
195}
196
197void
198__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
199{
200 if (--non_repr_priv->ref_count)
201 return;
202
203 list_del(entry: &non_repr_priv->list);
204 kfree(objp: non_repr_priv);
205}
206
207void
208nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
209{
210 struct nfp_flower_non_repr_priv *entry;
211
212 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
213 if (!entry)
214 return;
215
216 __nfp_flower_non_repr_priv_put(non_repr_priv: entry);
217}
218
219static enum nfp_repr_type
220nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
221{
222 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
223 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
224 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
225 port_id);
226 return NFP_REPR_TYPE_PHYS_PORT;
227
228 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
229 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
230 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
231 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
232 return NFP_REPR_TYPE_PF;
233 else
234 return NFP_REPR_TYPE_VF;
235 }
236
237 return __NFP_REPR_TYPE_MAX;
238}
239
240static struct net_device *
241nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
242{
243 enum nfp_repr_type repr_type;
244 struct nfp_reprs *reprs;
245 u8 port = 0;
246
247 /* Check if the port is internal. */
248 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
249 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
250 if (redir_egress)
251 *redir_egress = true;
252 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
253 return nfp_flower_get_netdev_from_internal_port_id(app, port_id: port);
254 }
255
256 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, port: &port);
257 if (repr_type > NFP_REPR_TYPE_MAX)
258 return NULL;
259
260 reprs = rcu_dereference(app->reprs[repr_type]);
261 if (!reprs)
262 return NULL;
263
264 if (port >= reprs->num_reprs)
265 return NULL;
266
267 return rcu_dereference(reprs->reprs[port]);
268}
269
270static int
271nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
272 bool exists)
273{
274 struct nfp_reprs *reprs;
275 int i, err, count = 0;
276
277 reprs = rcu_dereference_protected(app->reprs[type],
278 nfp_app_is_locked(app));
279 if (!reprs)
280 return 0;
281
282 for (i = 0; i < reprs->num_reprs; i++) {
283 struct net_device *netdev;
284
285 netdev = nfp_repr_get_locked(app, set: reprs, id: i);
286 if (netdev) {
287 struct nfp_repr *repr = netdev_priv(dev: netdev);
288
289 err = nfp_flower_cmsg_portreify(repr, exists);
290 if (err)
291 return err;
292 count++;
293 }
294 }
295
296 return count;
297}
298
299static int
300nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
301{
302 struct nfp_flower_priv *priv = app->priv;
303
304 if (!tot_repl)
305 return 0;
306
307 assert_nfp_app_locked(app);
308 if (!wait_event_timeout(priv->reify_wait_queue,
309 atomic_read(replies) >= tot_repl,
310 NFP_FL_REPLY_TIMEOUT)) {
311 nfp_warn(app->cpp, "Not all reprs responded to reify\n");
312 return -EIO;
313 }
314
315 return 0;
316}
317
318static int
319nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
320{
321 int err;
322
323 err = nfp_flower_cmsg_portmod(repr, carrier_ok: true, mtu: repr->netdev->mtu, mtu_only: false);
324 if (err)
325 return err;
326
327 netif_tx_wake_all_queues(dev: repr->netdev);
328
329 return 0;
330}
331
332static int
333nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
334{
335 netif_tx_disable(dev: repr->netdev);
336
337 return nfp_flower_cmsg_portmod(repr, carrier_ok: false, mtu: repr->netdev->mtu, mtu_only: false);
338}
339
340static void
341nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
342{
343 struct nfp_repr *repr = netdev_priv(dev: netdev);
344
345 kfree(objp: repr->app_priv);
346}
347
348static void
349nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
350{
351 struct nfp_repr *repr = netdev_priv(dev: netdev);
352 struct nfp_flower_priv *priv = app->priv;
353 atomic_t *replies = &priv->reify_replies;
354 int err;
355
356 atomic_set(v: replies, i: 0);
357 err = nfp_flower_cmsg_portreify(repr, exists: false);
358 if (err) {
359 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
360 return;
361 }
362
363 nfp_flower_wait_repr_reify(app, replies, tot_repl: 1);
364}
365
366static void nfp_flower_sriov_disable(struct nfp_app *app)
367{
368 struct nfp_flower_priv *priv = app->priv;
369
370 if (!priv->nn)
371 return;
372
373 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_VF);
374}
375
376static int
377nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
378 enum nfp_flower_cmsg_port_vnic_type vnic_type,
379 enum nfp_repr_type repr_type, unsigned int cnt)
380{
381 u8 nfp_pcie = nfp_cppcore_pcie_unit(cpp: app->pf->cpp);
382 struct nfp_flower_priv *priv = app->priv;
383 atomic_t *replies = &priv->reify_replies;
384 struct nfp_flower_repr_priv *repr_priv;
385 enum nfp_port_type port_type;
386 struct nfp_repr *nfp_repr;
387 struct nfp_reprs *reprs;
388 int i, err, reify_cnt;
389 const u8 queue = 0;
390
391 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
392 NFP_PORT_VF_PORT;
393
394 reprs = nfp_reprs_alloc(num_reprs: cnt);
395 if (!reprs)
396 return -ENOMEM;
397
398 for (i = 0; i < cnt; i++) {
399 struct net_device *repr;
400 struct nfp_port *port;
401 u32 port_id;
402
403 repr = nfp_repr_alloc(app);
404 if (!repr) {
405 err = -ENOMEM;
406 goto err_reprs_clean;
407 }
408
409 repr_priv = kzalloc(size: sizeof(*repr_priv), GFP_KERNEL);
410 if (!repr_priv) {
411 err = -ENOMEM;
412 nfp_repr_free(netdev: repr);
413 goto err_reprs_clean;
414 }
415
416 nfp_repr = netdev_priv(dev: repr);
417 nfp_repr->app_priv = repr_priv;
418 repr_priv->nfp_repr = nfp_repr;
419
420 /* For now we only support 1 PF */
421 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
422
423 port = nfp_port_alloc(app, type: port_type, netdev: repr);
424 if (IS_ERR(ptr: port)) {
425 err = PTR_ERR(ptr: port);
426 kfree(objp: repr_priv);
427 nfp_repr_free(netdev: repr);
428 goto err_reprs_clean;
429 }
430 if (repr_type == NFP_REPR_TYPE_PF) {
431 port->pf_id = i;
432 port->vnic = priv->nn->dp.ctrl_bar;
433 } else {
434 port->pf_id = 0;
435 port->vf_id = i;
436 port->vnic =
437 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
438 }
439
440 eth_hw_addr_random(dev: repr);
441
442 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, type: vnic_type,
443 vnic: i, q: queue);
444 err = nfp_repr_init(app, netdev: repr,
445 cmsg_port_id: port_id, port, pf_netdev: priv->nn->dp.netdev);
446 if (err) {
447 kfree(objp: repr_priv);
448 nfp_port_free(port);
449 nfp_repr_free(netdev: repr);
450 goto err_reprs_clean;
451 }
452
453 RCU_INIT_POINTER(reprs->reprs[i], repr);
454 nfp_info(app->cpp, "%s%d Representor(%s) created\n",
455 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
456 repr->name);
457 }
458
459 nfp_app_reprs_set(app, type: repr_type, reprs);
460
461 atomic_set(v: replies, i: 0);
462 reify_cnt = nfp_flower_reprs_reify(app, type: repr_type, exists: true);
463 if (reify_cnt < 0) {
464 err = reify_cnt;
465 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
466 goto err_reprs_remove;
467 }
468
469 err = nfp_flower_wait_repr_reify(app, replies, tot_repl: reify_cnt);
470 if (err)
471 goto err_reprs_remove;
472
473 return 0;
474err_reprs_remove:
475 reprs = nfp_app_reprs_set(app, type: repr_type, NULL);
476err_reprs_clean:
477 nfp_reprs_clean_and_free(app, reprs);
478 return err;
479}
480
481static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
482{
483 struct nfp_flower_priv *priv = app->priv;
484
485 if (!priv->nn)
486 return 0;
487
488 return nfp_flower_spawn_vnic_reprs(app,
489 vnic_type: NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
490 repr_type: NFP_REPR_TYPE_VF, cnt: num_vfs);
491}
492
493static int
494nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
495{
496 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
497 atomic_t *replies = &priv->reify_replies;
498 struct nfp_flower_repr_priv *repr_priv;
499 struct nfp_repr *nfp_repr;
500 struct sk_buff *ctrl_skb;
501 struct nfp_reprs *reprs;
502 int err, reify_cnt;
503 unsigned int i;
504
505 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, num_ports: eth_tbl->count);
506 if (!ctrl_skb)
507 return -ENOMEM;
508
509 reprs = nfp_reprs_alloc(num_reprs: eth_tbl->max_index + 1);
510 if (!reprs) {
511 err = -ENOMEM;
512 goto err_free_ctrl_skb;
513 }
514
515 for (i = 0; i < eth_tbl->count; i++) {
516 unsigned int phys_port = eth_tbl->ports[i].index;
517 struct net_device *repr;
518 struct nfp_port *port;
519 u32 cmsg_port_id;
520
521 repr = nfp_repr_alloc(app);
522 if (!repr) {
523 err = -ENOMEM;
524 goto err_reprs_clean;
525 }
526
527 repr_priv = kzalloc(size: sizeof(*repr_priv), GFP_KERNEL);
528 if (!repr_priv) {
529 err = -ENOMEM;
530 nfp_repr_free(netdev: repr);
531 goto err_reprs_clean;
532 }
533
534 nfp_repr = netdev_priv(dev: repr);
535 nfp_repr->app_priv = repr_priv;
536 repr_priv->nfp_repr = nfp_repr;
537
538 port = nfp_port_alloc(app, type: NFP_PORT_PHYS_PORT, netdev: repr);
539 if (IS_ERR(ptr: port)) {
540 err = PTR_ERR(ptr: port);
541 kfree(objp: repr_priv);
542 nfp_repr_free(netdev: repr);
543 goto err_reprs_clean;
544 }
545 err = nfp_port_init_phy_port(pf: app->pf, app, port, id: i);
546 if (err) {
547 kfree(objp: repr_priv);
548 nfp_port_free(port);
549 nfp_repr_free(netdev: repr);
550 goto err_reprs_clean;
551 }
552
553 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
554 nfp_net_get_mac_addr(pf: app->pf, netdev: repr, port);
555
556 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
557 err = nfp_repr_init(app, netdev: repr,
558 cmsg_port_id, port, pf_netdev: priv->nn->dp.netdev);
559 if (err) {
560 kfree(objp: repr_priv);
561 nfp_port_free(port);
562 nfp_repr_free(netdev: repr);
563 goto err_reprs_clean;
564 }
565
566 nfp_flower_cmsg_mac_repr_add(skb: ctrl_skb, idx: i,
567 nbi: eth_tbl->ports[i].nbi,
568 nbi_port: eth_tbl->ports[i].base,
569 phys_port);
570
571 RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
572 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
573 phys_port, repr->name);
574 }
575
576 nfp_app_reprs_set(app, type: NFP_REPR_TYPE_PHYS_PORT, reprs);
577
578 /* The REIFY/MAC_REPR control messages should be sent after the MAC
579 * representors are registered using nfp_app_reprs_set(). This is
580 * because the firmware may respond with control messages for the
581 * MAC representors, f.e. to provide the driver with information
582 * about their state, and without registration the driver will drop
583 * any such messages.
584 */
585 atomic_set(v: replies, i: 0);
586 reify_cnt = nfp_flower_reprs_reify(app, type: NFP_REPR_TYPE_PHYS_PORT, exists: true);
587 if (reify_cnt < 0) {
588 err = reify_cnt;
589 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
590 goto err_reprs_remove;
591 }
592
593 err = nfp_flower_wait_repr_reify(app, replies, tot_repl: reify_cnt);
594 if (err)
595 goto err_reprs_remove;
596
597 nfp_ctrl_tx(nn: app->ctrl, skb: ctrl_skb);
598
599 return 0;
600err_reprs_remove:
601 reprs = nfp_app_reprs_set(app, type: NFP_REPR_TYPE_PHYS_PORT, NULL);
602err_reprs_clean:
603 nfp_reprs_clean_and_free(app, reprs);
604err_free_ctrl_skb:
605 kfree_skb(skb: ctrl_skb);
606 return err;
607}
608
609static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
610 unsigned int id)
611{
612 if (id > 0) {
613 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
614 goto err_invalid_port;
615 }
616
617 eth_hw_addr_random(dev: nn->dp.netdev);
618 netif_keep_dst(dev: nn->dp.netdev);
619 nn->vnic_no_name = true;
620
621 return 0;
622
623err_invalid_port:
624 nn->port = nfp_port_alloc(app, type: NFP_PORT_INVALID, netdev: nn->dp.netdev);
625 return PTR_ERR_OR_ZERO(ptr: nn->port);
626}
627
628static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
629{
630 struct nfp_flower_priv *priv = app->priv;
631
632 if (app->pf->num_vfs)
633 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_VF);
634 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_PF);
635 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_PHYS_PORT);
636
637 priv->nn = NULL;
638}
639
640static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
641{
642 struct nfp_flower_priv *priv = app->priv;
643 int err;
644
645 priv->nn = nn;
646
647 err = nfp_flower_spawn_phy_reprs(app, priv: app->priv);
648 if (err)
649 goto err_clear_nn;
650
651 err = nfp_flower_spawn_vnic_reprs(app,
652 vnic_type: NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
653 repr_type: NFP_REPR_TYPE_PF, cnt: 1);
654 if (err)
655 goto err_destroy_reprs_phy;
656
657 if (app->pf->num_vfs) {
658 err = nfp_flower_spawn_vnic_reprs(app,
659 vnic_type: NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
660 repr_type: NFP_REPR_TYPE_VF,
661 cnt: app->pf->num_vfs);
662 if (err)
663 goto err_destroy_reprs_pf;
664 }
665
666 return 0;
667
668err_destroy_reprs_pf:
669 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_PF);
670err_destroy_reprs_phy:
671 nfp_reprs_clean_and_free_by_type(app, type: NFP_REPR_TYPE_PHYS_PORT);
672err_clear_nn:
673 priv->nn = NULL;
674 return err;
675}
676
677static void nfp_flower_wait_host_bit(struct nfp_app *app)
678{
679 unsigned long err_at;
680 u64 feat;
681 int err;
682
683 /* Wait for HOST_ACK flag bit to propagate */
684 err_at = jiffies + msecs_to_jiffies(m: 100);
685 do {
686 feat = nfp_rtsym_read_le(rtbl: app->pf->rtbl,
687 name: "_abi_flower_combined_features_global",
688 error: &err);
689 if (time_is_before_eq_jiffies(err_at)) {
690 nfp_warn(app->cpp,
691 "HOST_ACK bit not propagated in FW.\n");
692 break;
693 }
694 usleep_range(min: 1000, max: 2000);
695 } while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));
696
697 if (err)
698 nfp_warn(app->cpp,
699 "Could not read global features entry from FW\n");
700}
701
702static int nfp_flower_sync_feature_bits(struct nfp_app *app)
703{
704 struct nfp_flower_priv *app_priv = app->priv;
705 int err;
706
707 /* Tell the firmware of the host supported features. */
708 err = nfp_rtsym_write_le(rtbl: app->pf->rtbl, name: "_abi_flower_host_mask",
709 value: app_priv->flower_ext_feats |
710 NFP_FL_FEATS_HOST_ACK);
711 if (!err)
712 nfp_flower_wait_host_bit(app);
713 else if (err != -ENOENT)
714 return err;
715
716 /* Tell the firmware that the driver supports lag. */
717 err = nfp_rtsym_write_le(rtbl: app->pf->rtbl,
718 name: "_abi_flower_balance_sync_enable", value: 1);
719 if (!err) {
720 app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
721 nfp_flower_lag_init(lag: &app_priv->nfp_lag);
722 } else if (err == -ENOENT) {
723 nfp_warn(app->cpp, "LAG not supported by FW.\n");
724 } else {
725 return err;
726 }
727
728 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
729 /* Tell the firmware that the driver supports flow merging. */
730 err = nfp_rtsym_write_le(rtbl: app->pf->rtbl,
731 name: "_abi_flower_merge_hint_enable", value: 1);
732 if (!err) {
733 app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
734 nfp_flower_internal_port_init(priv: app_priv);
735 } else if (err == -ENOENT) {
736 nfp_warn(app->cpp,
737 "Flow merge not supported by FW.\n");
738 } else {
739 return err;
740 }
741 } else {
742 nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
743 }
744
745 return 0;
746}
747
748static int nfp_flower_init(struct nfp_app *app)
749{
750 u64 version, features, ctx_count, num_mems;
751 const struct nfp_pf *pf = app->pf;
752 struct nfp_flower_priv *app_priv;
753 int err;
754
755 if (!pf->eth_tbl) {
756 nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
757 return -EINVAL;
758 }
759
760 if (!pf->mac_stats_bar) {
761 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
762 return -EINVAL;
763 }
764
765 if (!pf->vf_cfg_bar) {
766 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
767 return -EINVAL;
768 }
769
770 version = nfp_rtsym_read_le(rtbl: app->pf->rtbl, name: "hw_flower_version", error: &err);
771 if (err) {
772 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
773 return err;
774 }
775
776 num_mems = nfp_rtsym_read_le(rtbl: app->pf->rtbl, name: "CONFIG_FC_HOST_CTX_SPLIT",
777 error: &err);
778 if (err) {
779 nfp_warn(app->cpp,
780 "FlowerNIC: unsupported host context memory: %d\n",
781 err);
782 err = 0;
783 num_mems = 1;
784 }
785
786 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
787 nfp_warn(app->cpp,
788 "FlowerNIC: invalid host context memory: %llu\n",
789 num_mems);
790 return -EINVAL;
791 }
792
793 ctx_count = nfp_rtsym_read_le(rtbl: app->pf->rtbl, name: "CONFIG_FC_HOST_CTX_COUNT",
794 error: &err);
795 if (err) {
796 nfp_warn(app->cpp,
797 "FlowerNIC: unsupported host context count: %d\n",
798 err);
799 err = 0;
800 ctx_count = BIT(17);
801 }
802
803 /* We need to ensure hardware has enough flower capabilities. */
804 if (version != NFP_FLOWER_ALLOWED_VER) {
805 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
806 return -EINVAL;
807 }
808
809 app_priv = vzalloc(size: sizeof(struct nfp_flower_priv));
810 if (!app_priv)
811 return -ENOMEM;
812
813 app_priv->total_mem_units = num_mems;
814 app_priv->active_mem_unit = 0;
815 app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
816 app->priv = app_priv;
817 app_priv->app = app;
818 skb_queue_head_init(list: &app_priv->cmsg_skbs_high);
819 skb_queue_head_init(list: &app_priv->cmsg_skbs_low);
820 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
821 init_waitqueue_head(&app_priv->reify_wait_queue);
822
823 init_waitqueue_head(&app_priv->mtu_conf.wait_q);
824 spin_lock_init(&app_priv->mtu_conf.lock);
825
826 err = nfp_flower_metadata_init(app, host_ctx_count: ctx_count, host_ctx_split: num_mems);
827 if (err)
828 goto err_free_app_priv;
829
830 /* Extract the extra features supported by the firmware. */
831 features = nfp_rtsym_read_le(rtbl: app->pf->rtbl,
832 name: "_abi_flower_extra_features", error: &err);
833 if (err)
834 app_priv->flower_ext_feats = 0;
835 else
836 app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
837
838 err = nfp_flower_sync_feature_bits(app);
839 if (err)
840 goto err_cleanup;
841
842 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
843 nfp_flower_qos_init(app);
844
845 INIT_LIST_HEAD(list: &app_priv->indr_block_cb_priv);
846 INIT_LIST_HEAD(list: &app_priv->non_repr_priv);
847 app_priv->pre_tun_rule_cnt = 0;
848
849 return 0;
850
851err_cleanup:
852 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
853 nfp_flower_lag_cleanup(lag: &app_priv->nfp_lag);
854 nfp_flower_metadata_cleanup(app);
855err_free_app_priv:
856 vfree(addr: app->priv);
857 return err;
858}
859
860static void nfp_flower_clean(struct nfp_app *app)
861{
862 struct nfp_flower_priv *app_priv = app->priv;
863
864 skb_queue_purge(list: &app_priv->cmsg_skbs_high);
865 skb_queue_purge(list: &app_priv->cmsg_skbs_low);
866 flush_work(work: &app_priv->cmsg_work);
867
868 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
869 nfp_flower_qos_cleanup(app);
870
871 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
872 nfp_flower_lag_cleanup(lag: &app_priv->nfp_lag);
873
874 if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
875 nfp_flower_internal_port_cleanup(priv: app_priv);
876
877 nfp_flower_metadata_cleanup(app);
878 vfree(addr: app->priv);
879 app->priv = NULL;
880}
881
882static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
883{
884 bool ret;
885
886 spin_lock_bh(lock: &app_priv->mtu_conf.lock);
887 ret = app_priv->mtu_conf.ack;
888 spin_unlock_bh(lock: &app_priv->mtu_conf.lock);
889
890 return ret;
891}
892
893static int
894nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
895 int new_mtu)
896{
897 struct nfp_flower_priv *app_priv = app->priv;
898 struct nfp_repr *repr = netdev_priv(dev: netdev);
899 int err;
900
901 /* Only need to config FW for physical port MTU change. */
902 if (repr->port->type != NFP_PORT_PHYS_PORT)
903 return 0;
904
905 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
906 nfp_err(app->cpp, "Physical port MTU setting not supported\n");
907 return -EINVAL;
908 }
909
910 spin_lock_bh(lock: &app_priv->mtu_conf.lock);
911 app_priv->mtu_conf.ack = false;
912 app_priv->mtu_conf.requested_val = new_mtu;
913 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
914 spin_unlock_bh(lock: &app_priv->mtu_conf.lock);
915
916 err = nfp_flower_cmsg_portmod(repr, carrier_ok: netif_carrier_ok(dev: netdev), mtu: new_mtu,
917 mtu_only: true);
918 if (err) {
919 spin_lock_bh(lock: &app_priv->mtu_conf.lock);
920 app_priv->mtu_conf.requested_val = 0;
921 spin_unlock_bh(lock: &app_priv->mtu_conf.lock);
922 return err;
923 }
924
925 /* Wait for fw to ack the change. */
926 if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
927 nfp_flower_check_ack(app_priv),
928 NFP_FL_REPLY_TIMEOUT)) {
929 spin_lock_bh(lock: &app_priv->mtu_conf.lock);
930 app_priv->mtu_conf.requested_val = 0;
931 spin_unlock_bh(lock: &app_priv->mtu_conf.lock);
932 nfp_warn(app->cpp, "MTU change not verified with fw\n");
933 return -EIO;
934 }
935
936 return 0;
937}
938
939static int nfp_flower_start(struct nfp_app *app)
940{
941 struct nfp_flower_priv *app_priv = app->priv;
942 int err;
943
944 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
945 err = nfp_flower_lag_reset(lag: &app_priv->nfp_lag);
946 if (err)
947 return err;
948 }
949
950 err = flow_indr_dev_register(cb: nfp_flower_indr_setup_tc_cb, cb_priv: app);
951 if (err)
952 return err;
953
954 err = nfp_tunnel_config_start(app);
955 if (err)
956 goto err_tunnel_config;
957
958 return 0;
959
960err_tunnel_config:
961 flow_indr_dev_unregister(cb: nfp_flower_indr_setup_tc_cb, cb_priv: app,
962 release: nfp_flower_setup_indr_tc_release);
963 return err;
964}
965
966static void nfp_flower_stop(struct nfp_app *app)
967{
968 nfp_tunnel_config_stop(app);
969
970 flow_indr_dev_unregister(cb: nfp_flower_indr_setup_tc_cb, cb_priv: app,
971 release: nfp_flower_setup_indr_tc_release);
972}
973
974static int
975nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
976 unsigned long event, void *ptr)
977{
978 struct nfp_flower_priv *app_priv = app->priv;
979 int ret;
980
981 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
982 ret = nfp_flower_lag_netdev_event(priv: app_priv, netdev, event, ptr);
983 if (ret & NOTIFY_STOP_MASK)
984 return ret;
985 }
986
987 ret = nfp_flower_internal_port_event_handler(app, netdev, event);
988 if (ret & NOTIFY_STOP_MASK)
989 return ret;
990
991 return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
992}
993
994const struct nfp_app_type app_flower = {
995 .id = NFP_APP_FLOWER_NIC,
996 .name = "flower",
997
998 .ctrl_cap_mask = ~0U,
999 .ctrl_has_meta = true,
1000
1001 .extra_cap = nfp_flower_extra_cap,
1002
1003 .init = nfp_flower_init,
1004 .clean = nfp_flower_clean,
1005
1006 .repr_change_mtu = nfp_flower_repr_change_mtu,
1007
1008 .vnic_alloc = nfp_flower_vnic_alloc,
1009 .vnic_init = nfp_flower_vnic_init,
1010 .vnic_clean = nfp_flower_vnic_clean,
1011
1012 .repr_preclean = nfp_flower_repr_netdev_preclean,
1013 .repr_clean = nfp_flower_repr_netdev_clean,
1014
1015 .repr_open = nfp_flower_repr_netdev_open,
1016 .repr_stop = nfp_flower_repr_netdev_stop,
1017
1018 .start = nfp_flower_start,
1019 .stop = nfp_flower_stop,
1020
1021 .netdev_event = nfp_flower_netdev_event,
1022
1023 .ctrl_msg_rx = nfp_flower_cmsg_rx,
1024
1025 .sriov_enable = nfp_flower_sriov_enable,
1026 .sriov_disable = nfp_flower_sriov_disable,
1027
1028 .eswitch_mode_get = eswitch_mode_get,
1029 .dev_get = nfp_flower_dev_get,
1030
1031 .setup_tc = nfp_flower_setup_tc,
1032};
1033

source code of linux/drivers/net/ethernet/netronome/nfp/flower/main.c