1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2018 Netronome Systems, Inc. */
3
4#include "main.h"
5
6/* LAG group config flags. */
7#define NFP_FL_LAG_LAST BIT(1)
8#define NFP_FL_LAG_FIRST BIT(2)
9#define NFP_FL_LAG_DATA BIT(3)
10#define NFP_FL_LAG_XON BIT(4)
11#define NFP_FL_LAG_SYNC BIT(5)
12#define NFP_FL_LAG_SWITCH BIT(6)
13#define NFP_FL_LAG_RESET BIT(7)
14
15/* LAG port state flags. */
16#define NFP_PORT_LAG_LINK_UP BIT(0)
17#define NFP_PORT_LAG_TX_ENABLED BIT(1)
18#define NFP_PORT_LAG_CHANGED BIT(2)
19
20enum nfp_fl_lag_batch {
21 NFP_FL_LAG_BATCH_FIRST,
22 NFP_FL_LAG_BATCH_MEMBER,
23 NFP_FL_LAG_BATCH_FINISHED
24};
25
26/**
27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
28 * @ctrl_flags: Configuration flags
29 * @reserved: Reserved for future use
30 * @ttl: Time to live of packet - host always sets to 0xff
31 * @pkt_number: Config message packet number - increment for each message
32 * @batch_ver: Batch version of messages - increment for each batch of messages
33 * @group_id: Group ID applicable
34 * @group_inst: Group instance number - increment when group is reused
35 * @members: Array of 32-bit words listing all active group members
36 */
37struct nfp_flower_cmsg_lag_config {
38 u8 ctrl_flags;
39 u8 reserved[2];
40 u8 ttl;
41 __be32 pkt_number;
42 __be32 batch_ver;
43 __be32 group_id;
44 __be32 group_inst;
45 __be32 members[];
46};
47
48/**
49 * struct nfp_fl_lag_group - list entry for each LAG group
50 * @group_id: Assigned group ID for host/kernel sync
51 * @group_inst: Group instance in case of ID reuse
52 * @list: List entry
53 * @master_ndev: Group master Netdev
54 * @dirty: Marked if the group needs synced to HW
55 * @offloaded: Marked if the group is currently offloaded to NIC
56 * @to_remove: Marked if the group should be removed from NIC
57 * @to_destroy: Marked if the group should be removed from driver
58 * @slave_cnt: Number of slaves in group
59 */
60struct nfp_fl_lag_group {
61 unsigned int group_id;
62 u8 group_inst;
63 struct list_head list;
64 struct net_device *master_ndev;
65 bool dirty;
66 bool offloaded;
67 bool to_remove;
68 bool to_destroy;
69 unsigned int slave_cnt;
70};
71
72#define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0)
73#define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0)
74#define NFP_FL_LAG_HOST_TTL 0xff
75
76/* Use this ID with zero members to ack a batch config */
77#define NFP_FL_LAG_SYNC_ID 0
78#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
79#define NFP_FL_LAG_GROUP_MAX 31 /* IDs 1 to 31 are valid */
80
81/* wait for more config */
82#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
83
84#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
85
86static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
87{
88 lag->pkt_num++;
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
90
91 return lag->pkt_num;
92}
93
94static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
95{
96 /* LSB is not considered by firmware so add 2 for each increment. */
97 lag->batch_ver += 2;
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
99
100 /* Zero is reserved by firmware. */
101 if (!lag->batch_ver)
102 lag->batch_ver += 2;
103}
104
105static struct nfp_fl_lag_group *
106nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
107{
108 struct nfp_fl_lag_group *group;
109 struct nfp_flower_priv *priv;
110 int id;
111
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
113
114 id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
115 NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
116 if (id < 0) {
117 nfp_flower_cmsg_warn(priv->app,
118 "No more bonding groups available\n");
119 return ERR_PTR(error: id);
120 }
121
122 group = kmalloc(size: sizeof(*group), GFP_KERNEL);
123 if (!group) {
124 ida_free(&lag->ida_handle, id);
125 return ERR_PTR(error: -ENOMEM);
126 }
127
128 group->group_id = id;
129 group->master_ndev = master;
130 group->dirty = true;
131 group->offloaded = false;
132 group->to_remove = false;
133 group->to_destroy = false;
134 group->slave_cnt = 0;
135 group->group_inst = ++lag->global_inst;
136 list_add_tail(new: &group->list, head: &lag->group_list);
137
138 return group;
139}
140
141static struct nfp_fl_lag_group *
142nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
143 struct net_device *master)
144{
145 struct nfp_fl_lag_group *entry;
146
147 if (!master)
148 return NULL;
149
150 list_for_each_entry(entry, &lag->group_list, list)
151 if (entry->master_ndev == master)
152 return entry;
153
154 return NULL;
155}
156
157static int nfp_fl_lag_get_group_info(struct nfp_app *app,
158 struct net_device *netdev,
159 __be16 *group_id,
160 u8 *batch_ver,
161 u8 *group_inst)
162{
163 struct nfp_flower_priv *priv = app->priv;
164 struct nfp_fl_lag_group *group = NULL;
165 __be32 temp_vers;
166
167 mutex_lock(&priv->nfp_lag.lock);
168 group = nfp_fl_lag_find_group_for_master_with_lag(lag: &priv->nfp_lag,
169 master: netdev);
170 if (!group) {
171 mutex_unlock(lock: &priv->nfp_lag.lock);
172 return -ENOENT;
173 }
174
175 if (group_id)
176 *group_id = cpu_to_be16(group->group_id);
177
178 if (batch_ver) {
179 temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
180 NFP_FL_PRE_LAG_VER_OFF);
181 memcpy(batch_ver, &temp_vers, 3);
182 }
183
184 if (group_inst)
185 *group_inst = group->group_inst;
186
187 mutex_unlock(lock: &priv->nfp_lag.lock);
188
189 return 0;
190}
191
192int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
193 struct net_device *master,
194 struct nfp_fl_pre_lag *pre_act,
195 struct netlink_ext_ack *extack)
196{
197 if (nfp_fl_lag_get_group_info(app, netdev: master, group_id: &pre_act->group_id,
198 batch_ver: pre_act->lag_version,
199 group_inst: &pre_act->instance)) {
200 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
201 return -ENOENT;
202 }
203
204 return 0;
205}
206
207void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
208 struct net_device *netdev,
209 struct nfp_tun_neigh_lag *lag)
210{
211 nfp_fl_lag_get_group_info(app, netdev, NULL,
212 batch_ver: lag->lag_version, group_inst: &lag->lag_instance);
213}
214
215int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
216{
217 struct nfp_flower_priv *priv = app->priv;
218 struct nfp_fl_lag_group *group = NULL;
219 int group_id = -ENOENT;
220
221 mutex_lock(&priv->nfp_lag.lock);
222 group = nfp_fl_lag_find_group_for_master_with_lag(lag: &priv->nfp_lag,
223 master);
224 if (group)
225 group_id = group->group_id;
226 mutex_unlock(lock: &priv->nfp_lag.lock);
227
228 return group_id;
229}
230
231static int
232nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
233 struct net_device **active_members,
234 unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
235{
236 struct nfp_flower_cmsg_lag_config *cmsg_payload;
237 struct nfp_flower_priv *priv;
238 unsigned long int flags;
239 unsigned int size, i;
240 struct sk_buff *skb;
241
242 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
243 size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
244 skb = nfp_flower_cmsg_alloc(app: priv->app, size,
245 type: NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
246 GFP_KERNEL);
247 if (!skb)
248 return -ENOMEM;
249
250 cmsg_payload = nfp_flower_cmsg_get_data(skb);
251 flags = 0;
252
253 /* Increment batch version for each new batch of config messages. */
254 if (*batch == NFP_FL_LAG_BATCH_FIRST) {
255 flags |= NFP_FL_LAG_FIRST;
256 nfp_fl_increment_version(lag);
257 *batch = NFP_FL_LAG_BATCH_MEMBER;
258 }
259
260 /* If it is a reset msg then it is also the end of the batch. */
261 if (lag->rst_cfg) {
262 flags |= NFP_FL_LAG_RESET;
263 *batch = NFP_FL_LAG_BATCH_FINISHED;
264 }
265
266 /* To signal the end of a batch, both the switch and last flags are set
267 * and the reserved SYNC group ID is used.
268 */
269 if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
270 flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
271 lag->rst_cfg = false;
272 cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
273 cmsg_payload->group_inst = 0;
274 } else {
275 cmsg_payload->group_id = cpu_to_be32(group->group_id);
276 cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
277 }
278
279 cmsg_payload->reserved[0] = 0;
280 cmsg_payload->reserved[1] = 0;
281 cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
282 cmsg_payload->ctrl_flags = flags;
283 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
284 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
285
286 for (i = 0; i < member_cnt; i++)
287 cmsg_payload->members[i] =
288 cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
289
290 nfp_ctrl_tx(nn: priv->app->ctrl, skb);
291 return 0;
292}
293
294static void nfp_fl_lag_do_work(struct work_struct *work)
295{
296 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
297 struct nfp_fl_lag_group *entry, *storage;
298 struct delayed_work *delayed_work;
299 struct nfp_flower_priv *priv;
300 struct nfp_fl_lag *lag;
301 int err;
302
303 delayed_work = to_delayed_work(work);
304 lag = container_of(delayed_work, struct nfp_fl_lag, work);
305 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
306
307 mutex_lock(&lag->lock);
308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
309 struct net_device *iter_netdev, **acti_netdevs;
310 struct nfp_flower_repr_priv *repr_priv;
311 int active_count = 0, slaves = 0;
312 struct nfp_repr *repr;
313 unsigned long *flags;
314
315 if (entry->to_remove) {
316 /* Active count of 0 deletes group on hw. */
317 err = nfp_fl_lag_config_group(lag, group: entry, NULL, member_cnt: 0,
318 batch: &batch);
319 if (!err) {
320 entry->to_remove = false;
321 entry->offloaded = false;
322 } else {
323 nfp_flower_cmsg_warn(priv->app,
324 "group delete failed\n");
325 schedule_delayed_work(dwork: &lag->work,
326 NFP_FL_LAG_DELAY);
327 continue;
328 }
329
330 if (entry->to_destroy) {
331 ida_free(&lag->ida_handle, id: entry->group_id);
332 list_del(entry: &entry->list);
333 kfree(objp: entry);
334 }
335 continue;
336 }
337
338 acti_netdevs = kmalloc_array(n: entry->slave_cnt,
339 size: sizeof(*acti_netdevs), GFP_KERNEL);
340 if (!acti_netdevs) {
341 schedule_delayed_work(dwork: &lag->work,
342 NFP_FL_LAG_DELAY);
343 continue;
344 }
345
346 /* Include sanity check in the loop. It may be that a bond has
347 * changed between processing the last notification and the
348 * work queue triggering. If the number of slaves has changed
349 * or it now contains netdevs that cannot be offloaded, ignore
350 * the group until pending notifications are processed.
351 */
352 rcu_read_lock();
353 for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
354 if (!nfp_netdev_is_nfp_repr(netdev: iter_netdev)) {
355 slaves = 0;
356 break;
357 }
358
359 repr = netdev_priv(dev: iter_netdev);
360
361 if (repr->app != priv->app) {
362 slaves = 0;
363 break;
364 }
365
366 slaves++;
367 if (slaves > entry->slave_cnt)
368 break;
369
370 /* Check the ports for state changes. */
371 repr_priv = repr->app_priv;
372 flags = &repr_priv->lag_port_flags;
373
374 if (*flags & NFP_PORT_LAG_CHANGED) {
375 *flags &= ~NFP_PORT_LAG_CHANGED;
376 entry->dirty = true;
377 }
378
379 if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
380 (*flags & NFP_PORT_LAG_LINK_UP))
381 acti_netdevs[active_count++] = iter_netdev;
382 }
383 rcu_read_unlock();
384
385 if (slaves != entry->slave_cnt || !entry->dirty) {
386 kfree(objp: acti_netdevs);
387 continue;
388 }
389
390 err = nfp_fl_lag_config_group(lag, group: entry, active_members: acti_netdevs,
391 member_cnt: active_count, batch: &batch);
392 if (!err) {
393 entry->offloaded = true;
394 entry->dirty = false;
395 } else {
396 nfp_flower_cmsg_warn(priv->app,
397 "group offload failed\n");
398 schedule_delayed_work(dwork: &lag->work, NFP_FL_LAG_DELAY);
399 }
400
401 kfree(objp: acti_netdevs);
402 }
403
404 /* End the config batch if at least one packet has been batched. */
405 if (batch == NFP_FL_LAG_BATCH_MEMBER) {
406 batch = NFP_FL_LAG_BATCH_FINISHED;
407 err = nfp_fl_lag_config_group(lag, NULL, NULL, member_cnt: 0, batch: &batch);
408 if (err)
409 nfp_flower_cmsg_warn(priv->app,
410 "group batch end cmsg failed\n");
411 }
412
413 mutex_unlock(lock: &lag->lock);
414}
415
416static int
417nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
418{
419 struct nfp_flower_cmsg_lag_config *cmsg_payload;
420
421 cmsg_payload = nfp_flower_cmsg_get_data(skb);
422 if (be32_to_cpu(cmsg_payload->group_id) > NFP_FL_LAG_GROUP_MAX)
423 return -EINVAL;
424
425 /* Drop cmsg retrans if storage limit is exceeded to prevent
426 * overloading. If the fw notices that expected messages have not been
427 * received in a given time block, it will request a full resync.
428 */
429 if (skb_queue_len(list_: &lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
430 return -ENOSPC;
431
432 __skb_queue_tail(list: &lag->retrans_skbs, newsk: skb);
433
434 return 0;
435}
436
437static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
438{
439 struct nfp_flower_priv *priv;
440 struct sk_buff *skb;
441
442 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
443
444 while ((skb = __skb_dequeue(list: &lag->retrans_skbs)))
445 nfp_ctrl_tx(nn: priv->app->ctrl, skb);
446}
447
448bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
449{
450 struct nfp_flower_cmsg_lag_config *cmsg_payload;
451 struct nfp_flower_priv *priv = app->priv;
452 struct nfp_fl_lag_group *group_entry;
453 unsigned long int flags;
454 bool store_skb = false;
455 int err;
456
457 cmsg_payload = nfp_flower_cmsg_get_data(skb);
458 flags = cmsg_payload->ctrl_flags;
459
460 /* Note the intentional fall through below. If DATA and XON are both
461 * set, the message will stored and sent again with the rest of the
462 * unprocessed messages list.
463 */
464
465 /* Store */
466 if (flags & NFP_FL_LAG_DATA)
467 if (!nfp_fl_lag_put_unprocessed(lag: &priv->nfp_lag, skb))
468 store_skb = true;
469
470 /* Send stored */
471 if (flags & NFP_FL_LAG_XON)
472 nfp_fl_send_unprocessed(lag: &priv->nfp_lag);
473
474 /* Resend all */
475 if (flags & NFP_FL_LAG_SYNC) {
476 /* To resend all config:
477 * 1) Clear all unprocessed messages
478 * 2) Mark all groups dirty
479 * 3) Reset NFP group config
480 * 4) Schedule a LAG config update
481 */
482
483 __skb_queue_purge(list: &priv->nfp_lag.retrans_skbs);
484
485 mutex_lock(&priv->nfp_lag.lock);
486 list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
487 list)
488 group_entry->dirty = true;
489
490 err = nfp_flower_lag_reset(lag: &priv->nfp_lag);
491 if (err)
492 nfp_flower_cmsg_warn(priv->app,
493 "mem err in group reset msg\n");
494 mutex_unlock(lock: &priv->nfp_lag.lock);
495
496 schedule_delayed_work(dwork: &priv->nfp_lag.work, delay: 0);
497 }
498
499 return store_skb;
500}
501
502static void
503nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
504 struct nfp_fl_lag_group *group)
505{
506 group->to_remove = true;
507
508 schedule_delayed_work(dwork: &lag->work, NFP_FL_LAG_DELAY);
509}
510
511static void
512nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
513 struct net_device *master)
514{
515 struct nfp_fl_lag_group *group;
516 struct nfp_flower_priv *priv;
517
518 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
519
520 if (!netif_is_bond_master(dev: master))
521 return;
522
523 mutex_lock(&lag->lock);
524 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
525 if (!group) {
526 mutex_unlock(lock: &lag->lock);
527 nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
528 netdev_name(master));
529 return;
530 }
531
532 group->to_remove = true;
533 group->to_destroy = true;
534 mutex_unlock(lock: &lag->lock);
535
536 schedule_delayed_work(dwork: &lag->work, NFP_FL_LAG_DELAY);
537}
538
539static int
540nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
541 struct netdev_notifier_changeupper_info *info)
542{
543 struct net_device *upper = info->upper_dev, *iter_netdev;
544 struct netdev_lag_upper_info *lag_upper_info;
545 struct nfp_fl_lag_group *group;
546 struct nfp_flower_priv *priv;
547 unsigned int slave_count = 0;
548 bool can_offload = true;
549 struct nfp_repr *repr;
550
551 if (!netif_is_lag_master(dev: upper))
552 return 0;
553
554 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
555
556 rcu_read_lock();
557 for_each_netdev_in_bond_rcu(upper, iter_netdev) {
558 if (!nfp_netdev_is_nfp_repr(netdev: iter_netdev)) {
559 can_offload = false;
560 break;
561 }
562 repr = netdev_priv(dev: iter_netdev);
563
564 /* Ensure all ports are created by the same app/on same card. */
565 if (repr->app != priv->app) {
566 can_offload = false;
567 break;
568 }
569
570 slave_count++;
571 }
572 rcu_read_unlock();
573
574 lag_upper_info = info->upper_info;
575
576 /* Firmware supports active/backup and L3/L4 hash bonds. */
577 if (lag_upper_info &&
578 lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
579 (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
580 (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
581 lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
582 lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
583 can_offload = false;
584 nfp_flower_cmsg_warn(priv->app,
585 "Unable to offload tx_type %u hash %u\n",
586 lag_upper_info->tx_type,
587 lag_upper_info->hash_type);
588 }
589
590 mutex_lock(&lag->lock);
591 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master: upper);
592
593 if (slave_count == 0 || !can_offload) {
594 /* Cannot offload the group - remove if previously offloaded. */
595 if (group && group->offloaded)
596 nfp_fl_lag_schedule_group_remove(lag, group);
597
598 mutex_unlock(lock: &lag->lock);
599 return 0;
600 }
601
602 if (!group) {
603 group = nfp_fl_lag_group_create(lag, master: upper);
604 if (IS_ERR(ptr: group)) {
605 mutex_unlock(lock: &lag->lock);
606 return PTR_ERR(ptr: group);
607 }
608 }
609
610 group->dirty = true;
611 group->slave_cnt = slave_count;
612
613 /* Group may have been on queue for removal but is now offloadable. */
614 group->to_remove = false;
615 mutex_unlock(lock: &lag->lock);
616
617 schedule_delayed_work(dwork: &lag->work, NFP_FL_LAG_DELAY);
618 return 0;
619}
620
621static void
622nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
623 struct netdev_notifier_changelowerstate_info *info)
624{
625 struct netdev_lag_lower_state_info *lag_lower_info;
626 struct nfp_flower_repr_priv *repr_priv;
627 struct nfp_flower_priv *priv;
628 struct nfp_repr *repr;
629 unsigned long *flags;
630
631 if (!netif_is_lag_port(dev: netdev) || !nfp_netdev_is_nfp_repr(netdev))
632 return;
633
634 lag_lower_info = info->lower_state_info;
635 if (!lag_lower_info)
636 return;
637
638 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
639 repr = netdev_priv(dev: netdev);
640
641 /* Verify that the repr is associated with this app. */
642 if (repr->app != priv->app)
643 return;
644
645 repr_priv = repr->app_priv;
646 flags = &repr_priv->lag_port_flags;
647
648 mutex_lock(&lag->lock);
649 if (lag_lower_info->link_up)
650 *flags |= NFP_PORT_LAG_LINK_UP;
651 else
652 *flags &= ~NFP_PORT_LAG_LINK_UP;
653
654 if (lag_lower_info->tx_enabled)
655 *flags |= NFP_PORT_LAG_TX_ENABLED;
656 else
657 *flags &= ~NFP_PORT_LAG_TX_ENABLED;
658
659 *flags |= NFP_PORT_LAG_CHANGED;
660 mutex_unlock(lock: &lag->lock);
661
662 schedule_delayed_work(dwork: &lag->work, NFP_FL_LAG_DELAY);
663}
664
665int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
666 struct net_device *netdev,
667 unsigned long event, void *ptr)
668{
669 struct nfp_fl_lag *lag = &priv->nfp_lag;
670 int err;
671
672 switch (event) {
673 case NETDEV_CHANGEUPPER:
674 err = nfp_fl_lag_changeupper_event(lag, info: ptr);
675 if (err)
676 return NOTIFY_BAD;
677 return NOTIFY_OK;
678 case NETDEV_CHANGELOWERSTATE:
679 nfp_fl_lag_changels_event(lag, netdev, info: ptr);
680 return NOTIFY_OK;
681 case NETDEV_UNREGISTER:
682 nfp_fl_lag_schedule_group_delete(lag, master: netdev);
683 return NOTIFY_OK;
684 }
685
686 return NOTIFY_DONE;
687}
688
689int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
690{
691 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
692
693 lag->rst_cfg = true;
694 return nfp_fl_lag_config_group(lag, NULL, NULL, member_cnt: 0, batch: &batch);
695}
696
697void nfp_flower_lag_init(struct nfp_fl_lag *lag)
698{
699 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
700 INIT_LIST_HEAD(list: &lag->group_list);
701 mutex_init(&lag->lock);
702 ida_init(ida: &lag->ida_handle);
703
704 __skb_queue_head_init(list: &lag->retrans_skbs);
705
706 /* 0 is a reserved batch version so increment to first valid value. */
707 nfp_fl_increment_version(lag);
708}
709
710void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
711{
712 struct nfp_fl_lag_group *entry, *storage;
713
714 cancel_delayed_work_sync(dwork: &lag->work);
715
716 __skb_queue_purge(list: &lag->retrans_skbs);
717
718 /* Remove all groups. */
719 mutex_lock(&lag->lock);
720 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
721 list_del(entry: &entry->list);
722 kfree(objp: entry);
723 }
724 mutex_unlock(lock: &lag->lock);
725 mutex_destroy(lock: &lag->lock);
726 ida_destroy(ida: &lag->ida_handle);
727}
728

source code of linux/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c