1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qede NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/netdevice.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <net/udp_tunnel.h> |
10 | #include <linux/bitops.h> |
11 | #include <linux/vmalloc.h> |
12 | |
13 | #include <linux/qed/qed_if.h> |
14 | #include "qede.h" |
15 | |
16 | #define QEDE_FILTER_PRINT_MAX_LEN (64) |
17 | struct qede_arfs_tuple { |
18 | union { |
19 | __be32 src_ipv4; |
20 | struct in6_addr src_ipv6; |
21 | }; |
22 | union { |
23 | __be32 dst_ipv4; |
24 | struct in6_addr dst_ipv6; |
25 | }; |
26 | __be16 src_port; |
27 | __be16 dst_port; |
28 | __be16 eth_proto; |
29 | u8 ip_proto; |
30 | |
31 | /* Describe filtering mode needed for this kind of filter */ |
32 | enum qed_filter_config_mode mode; |
33 | |
34 | /* Used to compare new/old filters. Return true if IPs match */ |
35 | bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b); |
36 | |
37 | /* Given an address into ethhdr build a header from tuple info */ |
38 | void (*build_hdr)(struct qede_arfs_tuple *t, void *); |
39 | |
40 | /* Stringify the tuple for a print into the provided buffer */ |
41 | void (*stringify)(struct qede_arfs_tuple *t, void *buffer); |
42 | }; |
43 | |
44 | struct qede_arfs_fltr_node { |
45 | #define QEDE_FLTR_VALID 0 |
46 | unsigned long state; |
47 | |
48 | /* pointer to aRFS packet buffer */ |
49 | void *data; |
50 | |
51 | /* dma map address of aRFS packet buffer */ |
52 | dma_addr_t mapping; |
53 | |
54 | /* length of aRFS packet buffer */ |
55 | int buf_len; |
56 | |
57 | /* tuples to hold from aRFS packet buffer */ |
58 | struct qede_arfs_tuple tuple; |
59 | |
60 | u32 flow_id; |
61 | u64 sw_id; |
62 | u16 rxq_id; |
63 | u16 next_rxq_id; |
64 | u8 vfid; |
65 | bool filter_op; |
66 | bool used; |
67 | u8 fw_rc; |
68 | bool b_is_drop; |
69 | struct hlist_node node; |
70 | }; |
71 | |
72 | struct qede_arfs { |
73 | #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx]) |
74 | #define QEDE_ARFS_POLL_COUNT 100 |
75 | #define QEDE_RFS_FLW_BITSHIFT (4) |
76 | #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) |
77 | struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT]; |
78 | |
79 | /* lock for filter list access */ |
80 | spinlock_t arfs_list_lock; |
81 | unsigned long *arfs_fltr_bmap; |
82 | int filter_count; |
83 | |
84 | /* Currently configured filtering mode */ |
85 | enum qed_filter_config_mode mode; |
86 | }; |
87 | |
88 | static void qede_configure_arfs_fltr(struct qede_dev *edev, |
89 | struct qede_arfs_fltr_node *n, |
90 | u16 rxq_id, bool add_fltr) |
91 | { |
92 | const struct qed_eth_ops *op = edev->ops; |
93 | struct qed_ntuple_filter_params params; |
94 | |
95 | if (n->used) |
96 | return; |
97 | |
98 | memset(¶ms, 0, sizeof(params)); |
99 | |
100 | params.addr = n->mapping; |
101 | params.length = n->buf_len; |
102 | params.qid = rxq_id; |
103 | params.b_is_add = add_fltr; |
104 | params.b_is_drop = n->b_is_drop; |
105 | |
106 | if (n->vfid) { |
107 | params.b_is_vf = true; |
108 | params.vf_id = n->vfid - 1; |
109 | } |
110 | |
111 | if (n->tuple.stringify) { |
112 | char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; |
113 | |
114 | n->tuple.stringify(&n->tuple, tuple_buffer); |
115 | DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, |
116 | "%s sw_id[0x%llx]: %s [vf %u queue %d]\n" , |
117 | add_fltr ? "Adding" : "Deleting" , |
118 | n->sw_id, tuple_buffer, n->vfid, rxq_id); |
119 | } |
120 | |
121 | n->used = true; |
122 | n->filter_op = add_fltr; |
123 | op->ntuple_filter_config(edev->cdev, n, ¶ms); |
124 | } |
125 | |
126 | static void |
127 | qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) |
128 | { |
129 | kfree(objp: fltr->data); |
130 | |
131 | if (fltr->sw_id < QEDE_RFS_MAX_FLTR) |
132 | clear_bit(nr: fltr->sw_id, addr: edev->arfs->arfs_fltr_bmap); |
133 | |
134 | kfree(objp: fltr); |
135 | } |
136 | |
137 | static int |
138 | qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, |
139 | struct qede_arfs_fltr_node *fltr, |
140 | u16 bucket_idx) |
141 | { |
142 | fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, |
143 | fltr->buf_len, DMA_TO_DEVICE); |
144 | if (dma_mapping_error(dev: &edev->pdev->dev, dma_addr: fltr->mapping)) { |
145 | DP_NOTICE(edev, "Failed to map DMA memory for rule\n" ); |
146 | qede_free_arfs_filter(edev, fltr); |
147 | return -ENOMEM; |
148 | } |
149 | |
150 | INIT_HLIST_NODE(h: &fltr->node); |
151 | hlist_add_head(n: &fltr->node, |
152 | QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); |
153 | |
154 | edev->arfs->filter_count++; |
155 | if (edev->arfs->filter_count == 1 && |
156 | edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { |
157 | edev->ops->configure_arfs_searcher(edev->cdev, |
158 | fltr->tuple.mode); |
159 | edev->arfs->mode = fltr->tuple.mode; |
160 | } |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | static void |
166 | qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, |
167 | struct qede_arfs_fltr_node *fltr) |
168 | { |
169 | hlist_del(n: &fltr->node); |
170 | dma_unmap_single(&edev->pdev->dev, fltr->mapping, |
171 | fltr->buf_len, DMA_TO_DEVICE); |
172 | |
173 | qede_free_arfs_filter(edev, fltr); |
174 | |
175 | edev->arfs->filter_count--; |
176 | if (!edev->arfs->filter_count && |
177 | edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) { |
178 | enum qed_filter_config_mode mode; |
179 | |
180 | mode = QED_FILTER_CONFIG_MODE_DISABLE; |
181 | edev->ops->configure_arfs_searcher(edev->cdev, mode); |
182 | edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE; |
183 | } |
184 | } |
185 | |
186 | void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) |
187 | { |
188 | struct qede_arfs_fltr_node *fltr = filter; |
189 | struct qede_dev *edev = dev; |
190 | |
191 | fltr->fw_rc = fw_rc; |
192 | |
193 | if (fw_rc) { |
194 | DP_NOTICE(edev, |
195 | "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n" , |
196 | fw_rc, fltr->flow_id, fltr->sw_id, |
197 | ntohs(fltr->tuple.src_port), |
198 | ntohs(fltr->tuple.dst_port), fltr->rxq_id); |
199 | |
200 | spin_lock_bh(lock: &edev->arfs->arfs_list_lock); |
201 | |
202 | fltr->used = false; |
203 | clear_bit(QEDE_FLTR_VALID, addr: &fltr->state); |
204 | |
205 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
206 | return; |
207 | } |
208 | |
209 | spin_lock_bh(lock: &edev->arfs->arfs_list_lock); |
210 | |
211 | fltr->used = false; |
212 | |
213 | if (fltr->filter_op) { |
214 | set_bit(QEDE_FLTR_VALID, addr: &fltr->state); |
215 | if (fltr->rxq_id != fltr->next_rxq_id) |
216 | qede_configure_arfs_fltr(edev, n: fltr, rxq_id: fltr->rxq_id, |
217 | add_fltr: false); |
218 | } else { |
219 | clear_bit(QEDE_FLTR_VALID, addr: &fltr->state); |
220 | if (fltr->rxq_id != fltr->next_rxq_id) { |
221 | fltr->rxq_id = fltr->next_rxq_id; |
222 | qede_configure_arfs_fltr(edev, n: fltr, |
223 | rxq_id: fltr->rxq_id, add_fltr: true); |
224 | } |
225 | } |
226 | |
227 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
228 | } |
229 | |
230 | /* Should be called while qede_lock is held */ |
231 | void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) |
232 | { |
233 | int i; |
234 | |
235 | for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) { |
236 | struct hlist_node *temp; |
237 | struct hlist_head *head; |
238 | struct qede_arfs_fltr_node *fltr; |
239 | |
240 | head = &edev->arfs->arfs_hl_head[i]; |
241 | |
242 | hlist_for_each_entry_safe(fltr, temp, head, node) { |
243 | bool del = false; |
244 | |
245 | if (edev->state != QEDE_STATE_OPEN) |
246 | del = true; |
247 | |
248 | spin_lock_bh(lock: &edev->arfs->arfs_list_lock); |
249 | |
250 | if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && |
251 | !fltr->used) || free_fltr) { |
252 | qede_dequeue_fltr_and_config_searcher(edev, |
253 | fltr); |
254 | } else { |
255 | bool flow_exp = false; |
256 | #ifdef CONFIG_RFS_ACCEL |
257 | flow_exp = rps_may_expire_flow(dev: edev->ndev, |
258 | rxq_index: fltr->rxq_id, |
259 | flow_id: fltr->flow_id, |
260 | filter_id: fltr->sw_id); |
261 | #endif |
262 | if ((flow_exp || del) && !free_fltr) |
263 | qede_configure_arfs_fltr(edev, n: fltr, |
264 | rxq_id: fltr->rxq_id, |
265 | add_fltr: false); |
266 | } |
267 | |
268 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
269 | } |
270 | } |
271 | |
272 | #ifdef CONFIG_RFS_ACCEL |
273 | spin_lock_bh(lock: &edev->arfs->arfs_list_lock); |
274 | |
275 | if (edev->arfs->filter_count) { |
276 | set_bit(QEDE_SP_ARFS_CONFIG, addr: &edev->sp_flags); |
277 | schedule_delayed_work(dwork: &edev->sp_task, |
278 | QEDE_SP_TASK_POLL_DELAY); |
279 | } |
280 | |
281 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
282 | #endif |
283 | } |
284 | |
285 | /* This function waits until all aRFS filters get deleted and freed. |
286 | * On timeout it frees all filters forcefully. |
287 | */ |
288 | void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev) |
289 | { |
290 | int count = QEDE_ARFS_POLL_COUNT; |
291 | |
292 | while (count) { |
293 | qede_process_arfs_filters(edev, free_fltr: false); |
294 | |
295 | if (!edev->arfs->filter_count) |
296 | break; |
297 | |
298 | msleep(msecs: 100); |
299 | count--; |
300 | } |
301 | |
302 | if (!count) { |
303 | DP_NOTICE(edev, "Timeout in polling for arfs filter free\n" ); |
304 | |
305 | /* Something is terribly wrong, free forcefully */ |
306 | qede_process_arfs_filters(edev, free_fltr: true); |
307 | } |
308 | } |
309 | |
310 | int qede_alloc_arfs(struct qede_dev *edev) |
311 | { |
312 | int i; |
313 | |
314 | if (!edev->dev_info.common.b_arfs_capable) |
315 | return -EINVAL; |
316 | |
317 | edev->arfs = vzalloc(size: sizeof(*edev->arfs)); |
318 | if (!edev->arfs) |
319 | return -ENOMEM; |
320 | |
321 | spin_lock_init(&edev->arfs->arfs_list_lock); |
322 | |
323 | for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) |
324 | INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i)); |
325 | |
326 | edev->arfs->arfs_fltr_bmap = |
327 | vzalloc(array_size(sizeof(long), |
328 | BITS_TO_LONGS(QEDE_RFS_MAX_FLTR))); |
329 | if (!edev->arfs->arfs_fltr_bmap) { |
330 | vfree(addr: edev->arfs); |
331 | edev->arfs = NULL; |
332 | return -ENOMEM; |
333 | } |
334 | |
335 | #ifdef CONFIG_RFS_ACCEL |
336 | edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); |
337 | if (!edev->ndev->rx_cpu_rmap) { |
338 | vfree(addr: edev->arfs->arfs_fltr_bmap); |
339 | edev->arfs->arfs_fltr_bmap = NULL; |
340 | vfree(addr: edev->arfs); |
341 | edev->arfs = NULL; |
342 | return -ENOMEM; |
343 | } |
344 | #endif |
345 | return 0; |
346 | } |
347 | |
348 | void qede_free_arfs(struct qede_dev *edev) |
349 | { |
350 | if (!edev->arfs) |
351 | return; |
352 | |
353 | #ifdef CONFIG_RFS_ACCEL |
354 | if (edev->ndev->rx_cpu_rmap) |
355 | free_irq_cpu_rmap(rmap: edev->ndev->rx_cpu_rmap); |
356 | |
357 | edev->ndev->rx_cpu_rmap = NULL; |
358 | #endif |
359 | vfree(addr: edev->arfs->arfs_fltr_bmap); |
360 | edev->arfs->arfs_fltr_bmap = NULL; |
361 | vfree(addr: edev->arfs); |
362 | edev->arfs = NULL; |
363 | } |
364 | |
365 | #ifdef CONFIG_RFS_ACCEL |
366 | static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, |
367 | const struct sk_buff *skb) |
368 | { |
369 | if (skb->protocol == htons(ETH_P_IP)) { |
370 | if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr && |
371 | tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr) |
372 | return true; |
373 | else |
374 | return false; |
375 | } else { |
376 | struct in6_addr *src = &tpos->tuple.src_ipv6; |
377 | u8 size = sizeof(struct in6_addr); |
378 | |
379 | if (!memcmp(p: src, q: &ipv6_hdr(skb)->saddr, size) && |
380 | !memcmp(p: &tpos->tuple.dst_ipv6, q: &ipv6_hdr(skb)->daddr, size)) |
381 | return true; |
382 | else |
383 | return false; |
384 | } |
385 | } |
386 | |
387 | static struct qede_arfs_fltr_node * |
388 | qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb, |
389 | __be16 src_port, __be16 dst_port, u8 ip_proto) |
390 | { |
391 | struct qede_arfs_fltr_node *tpos; |
392 | |
393 | hlist_for_each_entry(tpos, h, node) |
394 | if (tpos->tuple.ip_proto == ip_proto && |
395 | tpos->tuple.eth_proto == skb->protocol && |
396 | qede_compare_ip_addr(tpos, skb) && |
397 | tpos->tuple.src_port == src_port && |
398 | tpos->tuple.dst_port == dst_port) |
399 | return tpos; |
400 | |
401 | return NULL; |
402 | } |
403 | |
404 | static struct qede_arfs_fltr_node * |
405 | qede_alloc_filter(struct qede_dev *edev, int min_hlen) |
406 | { |
407 | struct qede_arfs_fltr_node *n; |
408 | int bit_id; |
409 | |
410 | bit_id = find_first_zero_bit(addr: edev->arfs->arfs_fltr_bmap, |
411 | QEDE_RFS_MAX_FLTR); |
412 | |
413 | if (bit_id >= QEDE_RFS_MAX_FLTR) |
414 | return NULL; |
415 | |
416 | n = kzalloc(size: sizeof(*n), GFP_ATOMIC); |
417 | if (!n) |
418 | return NULL; |
419 | |
420 | n->data = kzalloc(size: min_hlen, GFP_ATOMIC); |
421 | if (!n->data) { |
422 | kfree(objp: n); |
423 | return NULL; |
424 | } |
425 | |
426 | n->sw_id = (u16)bit_id; |
427 | set_bit(nr: bit_id, addr: edev->arfs->arfs_fltr_bmap); |
428 | return n; |
429 | } |
430 | |
431 | int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
432 | u16 rxq_index, u32 flow_id) |
433 | { |
434 | struct qede_dev *edev = netdev_priv(dev); |
435 | struct qede_arfs_fltr_node *n; |
436 | int min_hlen, rc, tp_offset; |
437 | struct ethhdr *eth; |
438 | __be16 *ports; |
439 | u16 tbl_idx; |
440 | u8 ip_proto; |
441 | |
442 | if (skb->encapsulation) |
443 | return -EPROTONOSUPPORT; |
444 | |
445 | if (skb->protocol != htons(ETH_P_IP) && |
446 | skb->protocol != htons(ETH_P_IPV6)) |
447 | return -EPROTONOSUPPORT; |
448 | |
449 | if (skb->protocol == htons(ETH_P_IP)) { |
450 | ip_proto = ip_hdr(skb)->protocol; |
451 | tp_offset = sizeof(struct iphdr); |
452 | } else { |
453 | ip_proto = ipv6_hdr(skb)->nexthdr; |
454 | tp_offset = sizeof(struct ipv6hdr); |
455 | } |
456 | |
457 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) |
458 | return -EPROTONOSUPPORT; |
459 | |
460 | ports = (__be16 *)(skb->data + tp_offset); |
461 | tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK; |
462 | |
463 | spin_lock_bh(lock: &edev->arfs->arfs_list_lock); |
464 | |
465 | n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), |
466 | skb, src_port: ports[0], dst_port: ports[1], ip_proto); |
467 | if (n) { |
468 | /* Filter match */ |
469 | n->next_rxq_id = rxq_index; |
470 | |
471 | if (test_bit(QEDE_FLTR_VALID, &n->state)) { |
472 | if (n->rxq_id != rxq_index) |
473 | qede_configure_arfs_fltr(edev, n, rxq_id: n->rxq_id, |
474 | add_fltr: false); |
475 | } else { |
476 | if (!n->used) { |
477 | n->rxq_id = rxq_index; |
478 | qede_configure_arfs_fltr(edev, n, rxq_id: n->rxq_id, |
479 | add_fltr: true); |
480 | } |
481 | } |
482 | |
483 | rc = n->sw_id; |
484 | goto ret_unlock; |
485 | } |
486 | |
487 | min_hlen = ETH_HLEN + skb_headlen(skb); |
488 | |
489 | n = qede_alloc_filter(edev, min_hlen); |
490 | if (!n) { |
491 | rc = -ENOMEM; |
492 | goto ret_unlock; |
493 | } |
494 | |
495 | n->buf_len = min_hlen; |
496 | n->rxq_id = rxq_index; |
497 | n->next_rxq_id = rxq_index; |
498 | n->tuple.src_port = ports[0]; |
499 | n->tuple.dst_port = ports[1]; |
500 | n->flow_id = flow_id; |
501 | |
502 | if (skb->protocol == htons(ETH_P_IP)) { |
503 | n->tuple.src_ipv4 = ip_hdr(skb)->saddr; |
504 | n->tuple.dst_ipv4 = ip_hdr(skb)->daddr; |
505 | } else { |
506 | memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr, |
507 | sizeof(struct in6_addr)); |
508 | memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, |
509 | sizeof(struct in6_addr)); |
510 | } |
511 | |
512 | eth = (struct ethhdr *)n->data; |
513 | eth->h_proto = skb->protocol; |
514 | n->tuple.eth_proto = skb->protocol; |
515 | n->tuple.ip_proto = ip_proto; |
516 | n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; |
517 | memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); |
518 | |
519 | rc = qede_enqueue_fltr_and_config_searcher(edev, fltr: n, bucket_idx: tbl_idx); |
520 | if (rc) |
521 | goto ret_unlock; |
522 | |
523 | qede_configure_arfs_fltr(edev, n, rxq_id: n->rxq_id, add_fltr: true); |
524 | |
525 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
526 | |
527 | set_bit(QEDE_SP_ARFS_CONFIG, addr: &edev->sp_flags); |
528 | schedule_delayed_work(dwork: &edev->sp_task, delay: 0); |
529 | |
530 | return n->sw_id; |
531 | |
532 | ret_unlock: |
533 | spin_unlock_bh(lock: &edev->arfs->arfs_list_lock); |
534 | return rc; |
535 | } |
536 | #endif |
537 | |
538 | void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port) |
539 | { |
540 | struct qede_dev *edev = dev; |
541 | |
542 | if (edev->vxlan_dst_port != vxlan_port) |
543 | edev->vxlan_dst_port = 0; |
544 | |
545 | if (edev->geneve_dst_port != geneve_port) |
546 | edev->geneve_dst_port = 0; |
547 | } |
548 | |
549 | void qede_force_mac(void *dev, u8 *mac, bool forced) |
550 | { |
551 | struct qede_dev *edev = dev; |
552 | |
553 | __qede_lock(edev); |
554 | |
555 | if (!is_valid_ether_addr(addr: mac)) { |
556 | __qede_unlock(edev); |
557 | return; |
558 | } |
559 | |
560 | eth_hw_addr_set(dev: edev->ndev, addr: mac); |
561 | __qede_unlock(edev); |
562 | } |
563 | |
564 | void (struct qede_dev *edev, |
565 | struct qed_update_vport_rss_params *, u8 *update) |
566 | { |
567 | bool need_reset = false; |
568 | int i; |
569 | |
570 | if (QEDE_RSS_COUNT(edev) <= 1) { |
571 | memset(rss, 0, sizeof(*rss)); |
572 | *update = 0; |
573 | return; |
574 | } |
575 | |
576 | /* Need to validate current RSS config uses valid entries */ |
577 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { |
578 | if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) { |
579 | need_reset = true; |
580 | break; |
581 | } |
582 | } |
583 | |
584 | if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) { |
585 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { |
586 | u16 indir_val, val; |
587 | |
588 | val = QEDE_RSS_COUNT(edev); |
589 | indir_val = ethtool_rxfh_indir_default(index: i, n_rx_rings: val); |
590 | edev->rss_ind_table[i] = indir_val; |
591 | } |
592 | edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; |
593 | } |
594 | |
595 | /* Now that we have the queue-indirection, prepare the handles */ |
596 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { |
597 | u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]); |
598 | |
599 | rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle; |
600 | } |
601 | |
602 | if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { |
603 | netdev_rss_key_fill(buffer: edev->rss_key, len: sizeof(edev->rss_key)); |
604 | edev->rss_params_inited |= QEDE_RSS_KEY_INITED; |
605 | } |
606 | memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key)); |
607 | |
608 | if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { |
609 | edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 | |
610 | QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; |
611 | edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; |
612 | } |
613 | rss->rss_caps = edev->rss_caps; |
614 | |
615 | *update = 1; |
616 | } |
617 | |
618 | static int qede_set_ucast_rx_mac(struct qede_dev *edev, |
619 | enum qed_filter_xcast_params_type opcode, |
620 | const unsigned char mac[ETH_ALEN]) |
621 | { |
622 | struct qed_filter_ucast_params ucast; |
623 | |
624 | memset(&ucast, 0, sizeof(ucast)); |
625 | ucast.type = opcode; |
626 | ucast.mac_valid = 1; |
627 | ether_addr_copy(dst: ucast.mac, src: mac); |
628 | |
629 | return edev->ops->filter_config_ucast(edev->cdev, &ucast); |
630 | } |
631 | |
632 | static int qede_set_ucast_rx_vlan(struct qede_dev *edev, |
633 | enum qed_filter_xcast_params_type opcode, |
634 | u16 vid) |
635 | { |
636 | struct qed_filter_ucast_params ucast; |
637 | |
638 | memset(&ucast, 0, sizeof(ucast)); |
639 | ucast.type = opcode; |
640 | ucast.vlan_valid = 1; |
641 | ucast.vlan = vid; |
642 | |
643 | return edev->ops->filter_config_ucast(edev->cdev, &ucast); |
644 | } |
645 | |
646 | static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) |
647 | { |
648 | struct qed_update_vport_params *params; |
649 | int rc; |
650 | |
651 | /* Proceed only if action actually needs to be performed */ |
652 | if (edev->accept_any_vlan == action) |
653 | return 0; |
654 | |
655 | params = vzalloc(size: sizeof(*params)); |
656 | if (!params) |
657 | return -ENOMEM; |
658 | |
659 | params->vport_id = 0; |
660 | params->accept_any_vlan = action; |
661 | params->update_accept_any_vlan_flg = 1; |
662 | |
663 | rc = edev->ops->vport_update(edev->cdev, params); |
664 | if (rc) { |
665 | DP_ERR(edev, "Failed to %s accept-any-vlan\n" , |
666 | action ? "enable" : "disable" ); |
667 | } else { |
668 | DP_INFO(edev, "%s accept-any-vlan\n" , |
669 | action ? "enabled" : "disabled" ); |
670 | edev->accept_any_vlan = action; |
671 | } |
672 | |
673 | vfree(addr: params); |
674 | return 0; |
675 | } |
676 | |
677 | int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
678 | { |
679 | struct qede_dev *edev = netdev_priv(dev); |
680 | struct qede_vlan *vlan, *tmp; |
681 | int rc = 0; |
682 | |
683 | DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n" , vid); |
684 | |
685 | vlan = kzalloc(size: sizeof(*vlan), GFP_KERNEL); |
686 | if (!vlan) { |
687 | DP_INFO(edev, "Failed to allocate struct for vlan\n" ); |
688 | return -ENOMEM; |
689 | } |
690 | INIT_LIST_HEAD(list: &vlan->list); |
691 | vlan->vid = vid; |
692 | vlan->configured = false; |
693 | |
694 | /* Verify vlan isn't already configured */ |
695 | list_for_each_entry(tmp, &edev->vlan_list, list) { |
696 | if (tmp->vid == vlan->vid) { |
697 | DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), |
698 | "vlan already configured\n" ); |
699 | kfree(objp: vlan); |
700 | return -EEXIST; |
701 | } |
702 | } |
703 | |
704 | /* If interface is down, cache this VLAN ID and return */ |
705 | __qede_lock(edev); |
706 | if (edev->state != QEDE_STATE_OPEN) { |
707 | DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
708 | "Interface is down, VLAN %d will be configured when interface is up\n" , |
709 | vid); |
710 | if (vid != 0) |
711 | edev->non_configured_vlans++; |
712 | list_add(new: &vlan->list, head: &edev->vlan_list); |
713 | goto out; |
714 | } |
715 | |
716 | /* Check for the filter limit. |
717 | * Note - vlan0 has a reserved filter and can be added without |
718 | * worrying about quota |
719 | */ |
720 | if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || |
721 | (vlan->vid == 0)) { |
722 | rc = qede_set_ucast_rx_vlan(edev, |
723 | opcode: QED_FILTER_XCAST_TYPE_ADD, |
724 | vid: vlan->vid); |
725 | if (rc) { |
726 | DP_ERR(edev, "Failed to configure VLAN %d\n" , |
727 | vlan->vid); |
728 | kfree(objp: vlan); |
729 | goto out; |
730 | } |
731 | vlan->configured = true; |
732 | |
733 | /* vlan0 filter isn't consuming out of our quota */ |
734 | if (vlan->vid != 0) |
735 | edev->configured_vlans++; |
736 | } else { |
737 | /* Out of quota; Activate accept-any-VLAN mode */ |
738 | if (!edev->non_configured_vlans) { |
739 | rc = qede_config_accept_any_vlan(edev, action: true); |
740 | if (rc) { |
741 | kfree(objp: vlan); |
742 | goto out; |
743 | } |
744 | } |
745 | |
746 | edev->non_configured_vlans++; |
747 | } |
748 | |
749 | list_add(new: &vlan->list, head: &edev->vlan_list); |
750 | |
751 | out: |
752 | __qede_unlock(edev); |
753 | return rc; |
754 | } |
755 | |
756 | static void qede_del_vlan_from_list(struct qede_dev *edev, |
757 | struct qede_vlan *vlan) |
758 | { |
759 | /* vlan0 filter isn't consuming out of our quota */ |
760 | if (vlan->vid != 0) { |
761 | if (vlan->configured) |
762 | edev->configured_vlans--; |
763 | else |
764 | edev->non_configured_vlans--; |
765 | } |
766 | |
767 | list_del(entry: &vlan->list); |
768 | kfree(objp: vlan); |
769 | } |
770 | |
771 | int qede_configure_vlan_filters(struct qede_dev *edev) |
772 | { |
773 | int rc = 0, real_rc = 0, accept_any_vlan = 0; |
774 | struct qed_dev_eth_info *dev_info; |
775 | struct qede_vlan *vlan = NULL; |
776 | |
777 | if (list_empty(head: &edev->vlan_list)) |
778 | return 0; |
779 | |
780 | dev_info = &edev->dev_info; |
781 | |
782 | /* Configure non-configured vlans */ |
783 | list_for_each_entry(vlan, &edev->vlan_list, list) { |
784 | if (vlan->configured) |
785 | continue; |
786 | |
787 | /* We have used all our credits, now enable accept_any_vlan */ |
788 | if ((vlan->vid != 0) && |
789 | (edev->configured_vlans == dev_info->num_vlan_filters)) { |
790 | accept_any_vlan = 1; |
791 | continue; |
792 | } |
793 | |
794 | DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n" , vlan->vid); |
795 | |
796 | rc = qede_set_ucast_rx_vlan(edev, opcode: QED_FILTER_XCAST_TYPE_ADD, |
797 | vid: vlan->vid); |
798 | if (rc) { |
799 | DP_ERR(edev, "Failed to configure VLAN %u\n" , |
800 | vlan->vid); |
801 | real_rc = rc; |
802 | continue; |
803 | } |
804 | |
805 | vlan->configured = true; |
806 | /* vlan0 filter doesn't consume our VLAN filter's quota */ |
807 | if (vlan->vid != 0) { |
808 | edev->non_configured_vlans--; |
809 | edev->configured_vlans++; |
810 | } |
811 | } |
812 | |
813 | /* enable accept_any_vlan mode if we have more VLANs than credits, |
814 | * or remove accept_any_vlan mode if we've actually removed |
815 | * a non-configured vlan, and all remaining vlans are truly configured. |
816 | */ |
817 | |
818 | if (accept_any_vlan) |
819 | rc = qede_config_accept_any_vlan(edev, action: true); |
820 | else if (!edev->non_configured_vlans) |
821 | rc = qede_config_accept_any_vlan(edev, action: false); |
822 | |
823 | if (rc && !real_rc) |
824 | real_rc = rc; |
825 | |
826 | return real_rc; |
827 | } |
828 | |
829 | int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
830 | { |
831 | struct qede_dev *edev = netdev_priv(dev); |
832 | struct qede_vlan *vlan; |
833 | int rc = 0; |
834 | |
835 | DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n" , vid); |
836 | |
837 | /* Find whether entry exists */ |
838 | __qede_lock(edev); |
839 | list_for_each_entry(vlan, &edev->vlan_list, list) |
840 | if (vlan->vid == vid) |
841 | break; |
842 | |
843 | if (list_entry_is_head(vlan, &edev->vlan_list, list)) { |
844 | DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), |
845 | "Vlan isn't configured\n" ); |
846 | goto out; |
847 | } |
848 | |
849 | if (edev->state != QEDE_STATE_OPEN) { |
850 | /* As interface is already down, we don't have a VPORT |
851 | * instance to remove vlan filter. So just update vlan list |
852 | */ |
853 | DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
854 | "Interface is down, removing VLAN from list only\n" ); |
855 | qede_del_vlan_from_list(edev, vlan); |
856 | goto out; |
857 | } |
858 | |
859 | /* Remove vlan */ |
860 | if (vlan->configured) { |
861 | rc = qede_set_ucast_rx_vlan(edev, opcode: QED_FILTER_XCAST_TYPE_DEL, |
862 | vid); |
863 | if (rc) { |
864 | DP_ERR(edev, "Failed to remove VLAN %d\n" , vid); |
865 | goto out; |
866 | } |
867 | } |
868 | |
869 | qede_del_vlan_from_list(edev, vlan); |
870 | |
871 | /* We have removed a VLAN - try to see if we can |
872 | * configure non-configured VLAN from the list. |
873 | */ |
874 | rc = qede_configure_vlan_filters(edev); |
875 | |
876 | out: |
877 | __qede_unlock(edev); |
878 | return rc; |
879 | } |
880 | |
881 | void qede_vlan_mark_nonconfigured(struct qede_dev *edev) |
882 | { |
883 | struct qede_vlan *vlan = NULL; |
884 | |
885 | if (list_empty(head: &edev->vlan_list)) |
886 | return; |
887 | |
888 | list_for_each_entry(vlan, &edev->vlan_list, list) { |
889 | if (!vlan->configured) |
890 | continue; |
891 | |
892 | vlan->configured = false; |
893 | |
894 | /* vlan0 filter isn't consuming out of our quota */ |
895 | if (vlan->vid != 0) { |
896 | edev->non_configured_vlans++; |
897 | edev->configured_vlans--; |
898 | } |
899 | |
900 | DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
901 | "marked vlan %d as non-configured\n" , vlan->vid); |
902 | } |
903 | |
904 | edev->accept_any_vlan = false; |
905 | } |
906 | |
907 | static void qede_set_features_reload(struct qede_dev *edev, |
908 | struct qede_reload_args *args) |
909 | { |
910 | edev->ndev->features = args->u.features; |
911 | } |
912 | |
913 | netdev_features_t qede_fix_features(struct net_device *dev, |
914 | netdev_features_t features) |
915 | { |
916 | struct qede_dev *edev = netdev_priv(dev); |
917 | |
918 | if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE || |
919 | !(features & NETIF_F_GRO)) |
920 | features &= ~NETIF_F_GRO_HW; |
921 | |
922 | return features; |
923 | } |
924 | |
925 | int qede_set_features(struct net_device *dev, netdev_features_t features) |
926 | { |
927 | struct qede_dev *edev = netdev_priv(dev); |
928 | netdev_features_t changes = features ^ dev->features; |
929 | bool need_reload = false; |
930 | |
931 | if (changes & NETIF_F_GRO_HW) |
932 | need_reload = true; |
933 | |
934 | if (need_reload) { |
935 | struct qede_reload_args args; |
936 | |
937 | args.u.features = features; |
938 | args.func = &qede_set_features_reload; |
939 | |
940 | /* Make sure that we definitely need to reload. |
941 | * In case of an eBPF attached program, there will be no FW |
942 | * aggregations, so no need to actually reload. |
943 | */ |
944 | __qede_lock(edev); |
945 | if (edev->xdp_prog) |
946 | args.func(edev, &args); |
947 | else |
948 | qede_reload(edev, args: &args, is_locked: true); |
949 | __qede_unlock(edev); |
950 | |
951 | return 1; |
952 | } |
953 | |
954 | return 0; |
955 | } |
956 | |
957 | static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table) |
958 | { |
959 | struct qede_dev *edev = netdev_priv(dev); |
960 | struct qed_tunn_params tunn_params; |
961 | struct udp_tunnel_info ti; |
962 | u16 *save_port; |
963 | int rc; |
964 | |
965 | memset(&tunn_params, 0, sizeof(tunn_params)); |
966 | |
967 | udp_tunnel_nic_get_port(dev, table, idx: 0, ti: &ti); |
968 | if (ti.type == UDP_TUNNEL_TYPE_VXLAN) { |
969 | tunn_params.update_vxlan_port = 1; |
970 | tunn_params.vxlan_port = ntohs(ti.port); |
971 | save_port = &edev->vxlan_dst_port; |
972 | } else { |
973 | tunn_params.update_geneve_port = 1; |
974 | tunn_params.geneve_port = ntohs(ti.port); |
975 | save_port = &edev->geneve_dst_port; |
976 | } |
977 | |
978 | __qede_lock(edev); |
979 | rc = edev->ops->tunn_config(edev->cdev, &tunn_params); |
980 | __qede_unlock(edev); |
981 | if (rc) |
982 | return rc; |
983 | |
984 | *save_port = ntohs(ti.port); |
985 | return 0; |
986 | } |
987 | |
988 | static const struct udp_tunnel_nic_info qede_udp_tunnels_both = { |
989 | .sync_table = qede_udp_tunnel_sync, |
990 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, |
991 | .tables = { |
992 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
993 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
994 | }, |
995 | }, qede_udp_tunnels_vxlan = { |
996 | .sync_table = qede_udp_tunnel_sync, |
997 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, |
998 | .tables = { |
999 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
1000 | }, |
1001 | }, qede_udp_tunnels_geneve = { |
1002 | .sync_table = qede_udp_tunnel_sync, |
1003 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, |
1004 | .tables = { |
1005 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
1006 | }, |
1007 | }; |
1008 | |
1009 | void qede_set_udp_tunnels(struct qede_dev *edev) |
1010 | { |
1011 | if (edev->dev_info.common.vxlan_enable && |
1012 | edev->dev_info.common.geneve_enable) |
1013 | edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both; |
1014 | else if (edev->dev_info.common.vxlan_enable) |
1015 | edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan; |
1016 | else if (edev->dev_info.common.geneve_enable) |
1017 | edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve; |
1018 | } |
1019 | |
1020 | static void qede_xdp_reload_func(struct qede_dev *edev, |
1021 | struct qede_reload_args *args) |
1022 | { |
1023 | struct bpf_prog *old; |
1024 | |
1025 | old = xchg(&edev->xdp_prog, args->u.new_prog); |
1026 | if (old) |
1027 | bpf_prog_put(prog: old); |
1028 | } |
1029 | |
1030 | static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) |
1031 | { |
1032 | struct qede_reload_args args; |
1033 | |
1034 | /* If we're called, there was already a bpf reference increment */ |
1035 | args.func = &qede_xdp_reload_func; |
1036 | args.u.new_prog = prog; |
1037 | qede_reload(edev, args: &args, is_locked: false); |
1038 | |
1039 | return 0; |
1040 | } |
1041 | |
1042 | int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
1043 | { |
1044 | struct qede_dev *edev = netdev_priv(dev); |
1045 | |
1046 | switch (xdp->command) { |
1047 | case XDP_SETUP_PROG: |
1048 | return qede_xdp_set(edev, prog: xdp->prog); |
1049 | default: |
1050 | return -EINVAL; |
1051 | } |
1052 | } |
1053 | |
1054 | static int qede_set_mcast_rx_mac(struct qede_dev *edev, |
1055 | enum qed_filter_xcast_params_type opcode, |
1056 | unsigned char *mac, int num_macs) |
1057 | { |
1058 | struct qed_filter_mcast_params mcast; |
1059 | int i; |
1060 | |
1061 | memset(&mcast, 0, sizeof(mcast)); |
1062 | mcast.type = opcode; |
1063 | mcast.num = num_macs; |
1064 | |
1065 | for (i = 0; i < num_macs; i++, mac += ETH_ALEN) |
1066 | ether_addr_copy(dst: mcast.mac[i], src: mac); |
1067 | |
1068 | return edev->ops->filter_config_mcast(edev->cdev, &mcast); |
1069 | } |
1070 | |
1071 | int qede_set_mac_addr(struct net_device *ndev, void *p) |
1072 | { |
1073 | struct qede_dev *edev = netdev_priv(dev: ndev); |
1074 | struct sockaddr *addr = p; |
1075 | int rc = 0; |
1076 | |
1077 | /* Make sure the state doesn't transition while changing the MAC. |
1078 | * Also, all flows accessing the dev_addr field are doing that under |
1079 | * this lock. |
1080 | */ |
1081 | __qede_lock(edev); |
1082 | |
1083 | if (!is_valid_ether_addr(addr: addr->sa_data)) { |
1084 | DP_NOTICE(edev, "The MAC address is not valid\n" ); |
1085 | rc = -EFAULT; |
1086 | goto out; |
1087 | } |
1088 | |
1089 | if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { |
1090 | DP_NOTICE(edev, "qed prevents setting MAC %pM\n" , |
1091 | addr->sa_data); |
1092 | rc = -EINVAL; |
1093 | goto out; |
1094 | } |
1095 | |
1096 | if (edev->state == QEDE_STATE_OPEN) { |
1097 | /* Remove the previous primary mac */ |
1098 | rc = qede_set_ucast_rx_mac(edev, opcode: QED_FILTER_XCAST_TYPE_DEL, |
1099 | mac: ndev->dev_addr); |
1100 | if (rc) |
1101 | goto out; |
1102 | } |
1103 | |
1104 | eth_hw_addr_set(dev: ndev, addr: addr->sa_data); |
1105 | DP_INFO(edev, "Setting device MAC to %pM\n" , addr->sa_data); |
1106 | |
1107 | if (edev->state != QEDE_STATE_OPEN) { |
1108 | DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
1109 | "The device is currently down\n" ); |
1110 | /* Ask PF to explicitly update a copy in bulletin board */ |
1111 | if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) |
1112 | edev->ops->req_bulletin_update_mac(edev->cdev, |
1113 | ndev->dev_addr); |
1114 | goto out; |
1115 | } |
1116 | |
1117 | edev->ops->common->update_mac(edev->cdev, ndev->dev_addr); |
1118 | |
1119 | rc = qede_set_ucast_rx_mac(edev, opcode: QED_FILTER_XCAST_TYPE_ADD, |
1120 | mac: ndev->dev_addr); |
1121 | out: |
1122 | __qede_unlock(edev); |
1123 | return rc; |
1124 | } |
1125 | |
1126 | static int |
1127 | qede_configure_mcast_filtering(struct net_device *ndev, |
1128 | enum qed_filter_rx_mode_type *accept_flags) |
1129 | { |
1130 | struct qede_dev *edev = netdev_priv(dev: ndev); |
1131 | unsigned char *mc_macs, *temp; |
1132 | struct netdev_hw_addr *ha; |
1133 | int rc = 0, mc_count; |
1134 | size_t size; |
1135 | |
1136 | size = 64 * ETH_ALEN; |
1137 | |
1138 | mc_macs = kzalloc(size, GFP_KERNEL); |
1139 | if (!mc_macs) { |
1140 | DP_NOTICE(edev, |
1141 | "Failed to allocate memory for multicast MACs\n" ); |
1142 | rc = -ENOMEM; |
1143 | goto exit; |
1144 | } |
1145 | |
1146 | temp = mc_macs; |
1147 | |
1148 | /* Remove all previously configured MAC filters */ |
1149 | rc = qede_set_mcast_rx_mac(edev, opcode: QED_FILTER_XCAST_TYPE_DEL, |
1150 | mac: mc_macs, num_macs: 1); |
1151 | if (rc) |
1152 | goto exit; |
1153 | |
1154 | netif_addr_lock_bh(dev: ndev); |
1155 | |
1156 | mc_count = netdev_mc_count(ndev); |
1157 | if (mc_count <= 64) { |
1158 | netdev_for_each_mc_addr(ha, ndev) { |
1159 | ether_addr_copy(dst: temp, src: ha->addr); |
1160 | temp += ETH_ALEN; |
1161 | } |
1162 | } |
1163 | |
1164 | netif_addr_unlock_bh(dev: ndev); |
1165 | |
1166 | /* Check for all multicast @@@TBD resource allocation */ |
1167 | if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) { |
1168 | if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) |
1169 | *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; |
1170 | } else { |
1171 | /* Add all multicast MAC filters */ |
1172 | rc = qede_set_mcast_rx_mac(edev, opcode: QED_FILTER_XCAST_TYPE_ADD, |
1173 | mac: mc_macs, num_macs: mc_count); |
1174 | } |
1175 | |
1176 | exit: |
1177 | kfree(objp: mc_macs); |
1178 | return rc; |
1179 | } |
1180 | |
1181 | void qede_set_rx_mode(struct net_device *ndev) |
1182 | { |
1183 | struct qede_dev *edev = netdev_priv(dev: ndev); |
1184 | |
1185 | set_bit(QEDE_SP_RX_MODE, addr: &edev->sp_flags); |
1186 | schedule_delayed_work(dwork: &edev->sp_task, delay: 0); |
1187 | } |
1188 | |
1189 | /* Must be called with qede_lock held */ |
1190 | void qede_config_rx_mode(struct net_device *ndev) |
1191 | { |
1192 | enum qed_filter_rx_mode_type accept_flags; |
1193 | struct qede_dev *edev = netdev_priv(dev: ndev); |
1194 | unsigned char *uc_macs, *temp; |
1195 | struct netdev_hw_addr *ha; |
1196 | int rc, uc_count; |
1197 | size_t size; |
1198 | |
1199 | netif_addr_lock_bh(dev: ndev); |
1200 | |
1201 | uc_count = netdev_uc_count(ndev); |
1202 | size = uc_count * ETH_ALEN; |
1203 | |
1204 | uc_macs = kzalloc(size, GFP_ATOMIC); |
1205 | if (!uc_macs) { |
1206 | DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n" ); |
1207 | netif_addr_unlock_bh(dev: ndev); |
1208 | return; |
1209 | } |
1210 | |
1211 | temp = uc_macs; |
1212 | netdev_for_each_uc_addr(ha, ndev) { |
1213 | ether_addr_copy(dst: temp, src: ha->addr); |
1214 | temp += ETH_ALEN; |
1215 | } |
1216 | |
1217 | netif_addr_unlock_bh(dev: ndev); |
1218 | |
1219 | /* Remove all previous unicast secondary macs and multicast macs |
1220 | * (configure / leave the primary mac) |
1221 | */ |
1222 | rc = qede_set_ucast_rx_mac(edev, opcode: QED_FILTER_XCAST_TYPE_REPLACE, |
1223 | mac: edev->ndev->dev_addr); |
1224 | if (rc) |
1225 | goto out; |
1226 | |
1227 | /* Check for promiscuous */ |
1228 | if (ndev->flags & IFF_PROMISC) |
1229 | accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; |
1230 | else |
1231 | accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; |
1232 | |
1233 | /* Configure all filters regardless, in case promisc is rejected */ |
1234 | if (uc_count < edev->dev_info.num_mac_filters) { |
1235 | int i; |
1236 | |
1237 | temp = uc_macs; |
1238 | for (i = 0; i < uc_count; i++) { |
1239 | rc = qede_set_ucast_rx_mac(edev, |
1240 | opcode: QED_FILTER_XCAST_TYPE_ADD, |
1241 | mac: temp); |
1242 | if (rc) |
1243 | goto out; |
1244 | |
1245 | temp += ETH_ALEN; |
1246 | } |
1247 | } else { |
1248 | accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; |
1249 | } |
1250 | |
1251 | rc = qede_configure_mcast_filtering(ndev, accept_flags: &accept_flags); |
1252 | if (rc) |
1253 | goto out; |
1254 | |
1255 | /* take care of VLAN mode */ |
1256 | if (ndev->flags & IFF_PROMISC) { |
1257 | qede_config_accept_any_vlan(edev, action: true); |
1258 | } else if (!edev->non_configured_vlans) { |
1259 | /* It's possible that accept_any_vlan mode is set due to a |
1260 | * previous setting of IFF_PROMISC. If vlan credits are |
1261 | * sufficient, disable accept_any_vlan. |
1262 | */ |
1263 | qede_config_accept_any_vlan(edev, action: false); |
1264 | } |
1265 | |
1266 | edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); |
1267 | out: |
1268 | kfree(objp: uc_macs); |
1269 | } |
1270 | |
1271 | static struct qede_arfs_fltr_node * |
1272 | qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location) |
1273 | { |
1274 | struct qede_arfs_fltr_node *fltr; |
1275 | |
1276 | hlist_for_each_entry(fltr, head, node) |
1277 | if (location == fltr->sw_id) |
1278 | return fltr; |
1279 | |
1280 | return NULL; |
1281 | } |
1282 | |
1283 | int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, |
1284 | u32 *rule_locs) |
1285 | { |
1286 | struct qede_arfs_fltr_node *fltr; |
1287 | struct hlist_head *head; |
1288 | int cnt = 0, rc = 0; |
1289 | |
1290 | info->data = QEDE_RFS_MAX_FLTR; |
1291 | |
1292 | __qede_lock(edev); |
1293 | |
1294 | if (!edev->arfs) { |
1295 | rc = -EPERM; |
1296 | goto unlock; |
1297 | } |
1298 | |
1299 | head = QEDE_ARFS_BUCKET_HEAD(edev, 0); |
1300 | |
1301 | hlist_for_each_entry(fltr, head, node) { |
1302 | if (cnt == info->rule_cnt) { |
1303 | rc = -EMSGSIZE; |
1304 | goto unlock; |
1305 | } |
1306 | |
1307 | rule_locs[cnt] = fltr->sw_id; |
1308 | cnt++; |
1309 | } |
1310 | |
1311 | info->rule_cnt = cnt; |
1312 | |
1313 | unlock: |
1314 | __qede_unlock(edev); |
1315 | return rc; |
1316 | } |
1317 | |
1318 | int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) |
1319 | { |
1320 | struct ethtool_rx_flow_spec *fsp = &cmd->fs; |
1321 | struct qede_arfs_fltr_node *fltr = NULL; |
1322 | int rc = 0; |
1323 | |
1324 | cmd->data = QEDE_RFS_MAX_FLTR; |
1325 | |
1326 | __qede_lock(edev); |
1327 | |
1328 | if (!edev->arfs) { |
1329 | rc = -EPERM; |
1330 | goto unlock; |
1331 | } |
1332 | |
1333 | fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), |
1334 | location: fsp->location); |
1335 | if (!fltr) { |
1336 | DP_NOTICE(edev, "Rule not found - location=0x%x\n" , |
1337 | fsp->location); |
1338 | rc = -EINVAL; |
1339 | goto unlock; |
1340 | } |
1341 | |
1342 | if (fltr->tuple.eth_proto == htons(ETH_P_IP)) { |
1343 | if (fltr->tuple.ip_proto == IPPROTO_TCP) |
1344 | fsp->flow_type = TCP_V4_FLOW; |
1345 | else |
1346 | fsp->flow_type = UDP_V4_FLOW; |
1347 | |
1348 | fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port; |
1349 | fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port; |
1350 | fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4; |
1351 | fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4; |
1352 | } else { |
1353 | if (fltr->tuple.ip_proto == IPPROTO_TCP) |
1354 | fsp->flow_type = TCP_V6_FLOW; |
1355 | else |
1356 | fsp->flow_type = UDP_V6_FLOW; |
1357 | fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port; |
1358 | fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port; |
1359 | memcpy(&fsp->h_u.tcp_ip6_spec.ip6src, |
1360 | &fltr->tuple.src_ipv6, sizeof(struct in6_addr)); |
1361 | memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst, |
1362 | &fltr->tuple.dst_ipv6, sizeof(struct in6_addr)); |
1363 | } |
1364 | |
1365 | fsp->ring_cookie = fltr->rxq_id; |
1366 | |
1367 | if (fltr->vfid) { |
1368 | fsp->ring_cookie |= ((u64)fltr->vfid) << |
1369 | ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; |
1370 | } |
1371 | |
1372 | if (fltr->b_is_drop) |
1373 | fsp->ring_cookie = RX_CLS_FLOW_DISC; |
1374 | unlock: |
1375 | __qede_unlock(edev); |
1376 | return rc; |
1377 | } |
1378 | |
1379 | static int |
1380 | qede_poll_arfs_filter_config(struct qede_dev *edev, |
1381 | struct qede_arfs_fltr_node *fltr) |
1382 | { |
1383 | int count = QEDE_ARFS_POLL_COUNT; |
1384 | |
1385 | while (fltr->used && count) { |
1386 | msleep(msecs: 20); |
1387 | count--; |
1388 | } |
1389 | |
1390 | if (count == 0 || fltr->fw_rc) { |
1391 | DP_NOTICE(edev, "Timeout in polling filter config\n" ); |
1392 | qede_dequeue_fltr_and_config_searcher(edev, fltr); |
1393 | return -EIO; |
1394 | } |
1395 | |
1396 | return fltr->fw_rc; |
1397 | } |
1398 | |
1399 | static int (struct qede_arfs_tuple *t) |
1400 | { |
1401 | int size = ETH_HLEN; |
1402 | |
1403 | if (t->eth_proto == htons(ETH_P_IP)) |
1404 | size += sizeof(struct iphdr); |
1405 | else |
1406 | size += sizeof(struct ipv6hdr); |
1407 | |
1408 | if (t->ip_proto == IPPROTO_TCP) |
1409 | size += sizeof(struct tcphdr); |
1410 | else |
1411 | size += sizeof(struct udphdr); |
1412 | |
1413 | return size; |
1414 | } |
1415 | |
1416 | static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a, |
1417 | struct qede_arfs_tuple *b) |
1418 | { |
1419 | if (a->eth_proto != htons(ETH_P_IP) || |
1420 | b->eth_proto != htons(ETH_P_IP)) |
1421 | return false; |
1422 | |
1423 | return (a->src_ipv4 == b->src_ipv4) && |
1424 | (a->dst_ipv4 == b->dst_ipv4); |
1425 | } |
1426 | |
1427 | static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t, |
1428 | void *) |
1429 | { |
1430 | __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr)); |
1431 | struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN); |
1432 | struct ethhdr *eth = (struct ethhdr *)header; |
1433 | |
1434 | eth->h_proto = t->eth_proto; |
1435 | ip->saddr = t->src_ipv4; |
1436 | ip->daddr = t->dst_ipv4; |
1437 | ip->version = 0x4; |
1438 | ip->ihl = 0x5; |
1439 | ip->protocol = t->ip_proto; |
1440 | ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN); |
1441 | |
1442 | /* ports is weakly typed to suit both TCP and UDP ports */ |
1443 | ports[0] = t->src_port; |
1444 | ports[1] = t->dst_port; |
1445 | } |
1446 | |
1447 | static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t, |
1448 | void *buffer) |
1449 | { |
1450 | const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP" ; |
1451 | |
1452 | snprintf(buf: buffer, QEDE_FILTER_PRINT_MAX_LEN, |
1453 | fmt: "%s %pI4 (%04x) -> %pI4 (%04x)" , |
1454 | prefix, &t->src_ipv4, t->src_port, |
1455 | &t->dst_ipv4, t->dst_port); |
1456 | } |
1457 | |
1458 | static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a, |
1459 | struct qede_arfs_tuple *b) |
1460 | { |
1461 | if (a->eth_proto != htons(ETH_P_IPV6) || |
1462 | b->eth_proto != htons(ETH_P_IPV6)) |
1463 | return false; |
1464 | |
1465 | if (memcmp(p: &a->src_ipv6, q: &b->src_ipv6, size: sizeof(struct in6_addr))) |
1466 | return false; |
1467 | |
1468 | if (memcmp(p: &a->dst_ipv6, q: &b->dst_ipv6, size: sizeof(struct in6_addr))) |
1469 | return false; |
1470 | |
1471 | return true; |
1472 | } |
1473 | |
1474 | static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, |
1475 | void *) |
1476 | { |
1477 | __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr)); |
1478 | struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN); |
1479 | struct ethhdr *eth = (struct ethhdr *)header; |
1480 | |
1481 | eth->h_proto = t->eth_proto; |
1482 | memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr)); |
1483 | memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr)); |
1484 | ip6->version = 0x6; |
1485 | |
1486 | if (t->ip_proto == IPPROTO_TCP) { |
1487 | ip6->nexthdr = NEXTHDR_TCP; |
1488 | ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); |
1489 | } else { |
1490 | ip6->nexthdr = NEXTHDR_UDP; |
1491 | ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); |
1492 | } |
1493 | |
1494 | /* ports is weakly typed to suit both TCP and UDP ports */ |
1495 | ports[0] = t->src_port; |
1496 | ports[1] = t->dst_port; |
1497 | } |
1498 | |
1499 | /* Validate fields which are set and not accepted by the driver */ |
1500 | static int qede_flow_spec_validate_unused(struct qede_dev *edev, |
1501 | struct ethtool_rx_flow_spec *fs) |
1502 | { |
1503 | if (fs->flow_type & FLOW_MAC_EXT) { |
1504 | DP_INFO(edev, "Don't support MAC extensions\n" ); |
1505 | return -EOPNOTSUPP; |
1506 | } |
1507 | |
1508 | if ((fs->flow_type & FLOW_EXT) && |
1509 | (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) { |
1510 | DP_INFO(edev, "Don't support vlan-based classification\n" ); |
1511 | return -EOPNOTSUPP; |
1512 | } |
1513 | |
1514 | if ((fs->flow_type & FLOW_EXT) && |
1515 | (fs->h_ext.data[0] || fs->h_ext.data[1])) { |
1516 | DP_INFO(edev, "Don't support user defined data\n" ); |
1517 | return -EOPNOTSUPP; |
1518 | } |
1519 | |
1520 | return 0; |
1521 | } |
1522 | |
1523 | static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, |
1524 | struct qede_arfs_tuple *t) |
1525 | { |
1526 | /* We must have Only 4-tuples/l4 port/src ip/dst ip |
1527 | * as an input. |
1528 | */ |
1529 | if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { |
1530 | t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; |
1531 | } else if (!t->src_port && t->dst_port && |
1532 | !t->src_ipv4 && !t->dst_ipv4) { |
1533 | t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; |
1534 | } else if (!t->src_port && !t->dst_port && |
1535 | !t->dst_ipv4 && t->src_ipv4) { |
1536 | t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; |
1537 | } else if (!t->src_port && !t->dst_port && |
1538 | t->dst_ipv4 && !t->src_ipv4) { |
1539 | t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; |
1540 | } else { |
1541 | DP_INFO(edev, "Invalid N-tuple\n" ); |
1542 | return -EOPNOTSUPP; |
1543 | } |
1544 | |
1545 | t->ip_comp = qede_flow_spec_ipv4_cmp; |
1546 | t->build_hdr = qede_flow_build_ipv4_hdr; |
1547 | t->stringify = qede_flow_stringify_ipv4_hdr; |
1548 | |
1549 | return 0; |
1550 | } |
1551 | |
1552 | static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, |
1553 | struct qede_arfs_tuple *t, |
1554 | struct in6_addr *zaddr) |
1555 | { |
1556 | /* We must have Only 4-tuples/l4 port/src ip/dst ip |
1557 | * as an input. |
1558 | */ |
1559 | if (t->src_port && t->dst_port && |
1560 | memcmp(p: &t->src_ipv6, q: zaddr, size: sizeof(struct in6_addr)) && |
1561 | memcmp(p: &t->dst_ipv6, q: zaddr, size: sizeof(struct in6_addr))) { |
1562 | t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; |
1563 | } else if (!t->src_port && t->dst_port && |
1564 | !memcmp(p: &t->src_ipv6, q: zaddr, size: sizeof(struct in6_addr)) && |
1565 | !memcmp(p: &t->dst_ipv6, q: zaddr, size: sizeof(struct in6_addr))) { |
1566 | t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; |
1567 | } else if (!t->src_port && !t->dst_port && |
1568 | !memcmp(p: &t->dst_ipv6, q: zaddr, size: sizeof(struct in6_addr)) && |
1569 | memcmp(p: &t->src_ipv6, q: zaddr, size: sizeof(struct in6_addr))) { |
1570 | t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; |
1571 | } else if (!t->src_port && !t->dst_port && |
1572 | memcmp(p: &t->dst_ipv6, q: zaddr, size: sizeof(struct in6_addr)) && |
1573 | !memcmp(p: &t->src_ipv6, q: zaddr, size: sizeof(struct in6_addr))) { |
1574 | t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; |
1575 | } else { |
1576 | DP_INFO(edev, "Invalid N-tuple\n" ); |
1577 | return -EOPNOTSUPP; |
1578 | } |
1579 | |
1580 | t->ip_comp = qede_flow_spec_ipv6_cmp; |
1581 | t->build_hdr = qede_flow_build_ipv6_hdr; |
1582 | |
1583 | return 0; |
1584 | } |
1585 | |
1586 | /* Must be called while qede lock is held */ |
1587 | static struct qede_arfs_fltr_node * |
1588 | qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) |
1589 | { |
1590 | struct qede_arfs_fltr_node *fltr; |
1591 | struct hlist_node *temp; |
1592 | struct hlist_head *head; |
1593 | |
1594 | head = QEDE_ARFS_BUCKET_HEAD(edev, 0); |
1595 | |
1596 | hlist_for_each_entry_safe(fltr, temp, head, node) { |
1597 | if (fltr->tuple.ip_proto == t->ip_proto && |
1598 | fltr->tuple.src_port == t->src_port && |
1599 | fltr->tuple.dst_port == t->dst_port && |
1600 | t->ip_comp(&fltr->tuple, t)) |
1601 | return fltr; |
1602 | } |
1603 | |
1604 | return NULL; |
1605 | } |
1606 | |
1607 | static void qede_flow_set_destination(struct qede_dev *edev, |
1608 | struct qede_arfs_fltr_node *n, |
1609 | struct ethtool_rx_flow_spec *fs) |
1610 | { |
1611 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { |
1612 | n->b_is_drop = true; |
1613 | return; |
1614 | } |
1615 | |
1616 | n->vfid = ethtool_get_flow_spec_ring_vf(ring_cookie: fs->ring_cookie); |
1617 | n->rxq_id = ethtool_get_flow_spec_ring(ring_cookie: fs->ring_cookie); |
1618 | n->next_rxq_id = n->rxq_id; |
1619 | |
1620 | if (n->vfid) |
1621 | DP_VERBOSE(edev, QED_MSG_SP, |
1622 | "Configuring N-tuple for VF 0x%02x\n" , n->vfid - 1); |
1623 | } |
1624 | |
1625 | int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) |
1626 | { |
1627 | struct qede_arfs_fltr_node *fltr = NULL; |
1628 | int rc = -EPERM; |
1629 | |
1630 | __qede_lock(edev); |
1631 | if (!edev->arfs) |
1632 | goto unlock; |
1633 | |
1634 | fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), |
1635 | location: cookie); |
1636 | if (!fltr) |
1637 | goto unlock; |
1638 | |
1639 | qede_configure_arfs_fltr(edev, n: fltr, rxq_id: fltr->rxq_id, add_fltr: false); |
1640 | |
1641 | rc = qede_poll_arfs_filter_config(edev, fltr); |
1642 | if (rc == 0) |
1643 | qede_dequeue_fltr_and_config_searcher(edev, fltr); |
1644 | |
1645 | unlock: |
1646 | __qede_unlock(edev); |
1647 | return rc; |
1648 | } |
1649 | |
1650 | int qede_get_arfs_filter_count(struct qede_dev *edev) |
1651 | { |
1652 | int count = 0; |
1653 | |
1654 | __qede_lock(edev); |
1655 | |
1656 | if (!edev->arfs) |
1657 | goto unlock; |
1658 | |
1659 | count = edev->arfs->filter_count; |
1660 | |
1661 | unlock: |
1662 | __qede_unlock(edev); |
1663 | return count; |
1664 | } |
1665 | |
1666 | static int qede_parse_actions(struct qede_dev *edev, |
1667 | struct flow_action *flow_action, |
1668 | struct netlink_ext_ack *extack) |
1669 | { |
1670 | const struct flow_action_entry *act; |
1671 | int i; |
1672 | |
1673 | if (!flow_action_has_entries(action: flow_action)) { |
1674 | DP_NOTICE(edev, "No actions received\n" ); |
1675 | return -EINVAL; |
1676 | } |
1677 | |
1678 | if (!flow_action_basic_hw_stats_check(action: flow_action, extack)) |
1679 | return -EOPNOTSUPP; |
1680 | |
1681 | flow_action_for_each(i, act, flow_action) { |
1682 | switch (act->id) { |
1683 | case FLOW_ACTION_DROP: |
1684 | break; |
1685 | case FLOW_ACTION_QUEUE: |
1686 | if (act->queue.vf) |
1687 | break; |
1688 | |
1689 | if (act->queue.index >= QEDE_RSS_COUNT(edev)) { |
1690 | DP_INFO(edev, "Queue out-of-bounds\n" ); |
1691 | return -EINVAL; |
1692 | } |
1693 | break; |
1694 | default: |
1695 | return -EINVAL; |
1696 | } |
1697 | } |
1698 | |
1699 | return 0; |
1700 | } |
1701 | |
1702 | static int |
1703 | qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, |
1704 | struct qede_arfs_tuple *t) |
1705 | { |
1706 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS)) { |
1707 | struct flow_match_ports match; |
1708 | |
1709 | flow_rule_match_ports(rule, out: &match); |
1710 | if ((match.key->src && match.mask->src != htons(U16_MAX)) || |
1711 | (match.key->dst && match.mask->dst != htons(U16_MAX))) { |
1712 | DP_NOTICE(edev, "Do not support ports masks\n" ); |
1713 | return -EINVAL; |
1714 | } |
1715 | |
1716 | t->src_port = match.key->src; |
1717 | t->dst_port = match.key->dst; |
1718 | } |
1719 | |
1720 | return 0; |
1721 | } |
1722 | |
1723 | static int |
1724 | qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, |
1725 | struct qede_arfs_tuple *t) |
1726 | { |
1727 | struct in6_addr zero_addr, addr; |
1728 | |
1729 | memset(&zero_addr, 0, sizeof(addr)); |
1730 | memset(&addr, 0xff, sizeof(addr)); |
1731 | |
1732 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { |
1733 | struct flow_match_ipv6_addrs match; |
1734 | |
1735 | flow_rule_match_ipv6_addrs(rule, out: &match); |
1736 | if ((memcmp(p: &match.key->src, q: &zero_addr, size: sizeof(addr)) && |
1737 | memcmp(p: &match.mask->src, q: &addr, size: sizeof(addr))) || |
1738 | (memcmp(p: &match.key->dst, q: &zero_addr, size: sizeof(addr)) && |
1739 | memcmp(p: &match.mask->dst, q: &addr, size: sizeof(addr)))) { |
1740 | DP_NOTICE(edev, |
1741 | "Do not support IPv6 address prefix/mask\n" ); |
1742 | return -EINVAL; |
1743 | } |
1744 | |
1745 | memcpy(&t->src_ipv6, &match.key->src, sizeof(addr)); |
1746 | memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); |
1747 | } |
1748 | |
1749 | if (qede_flow_parse_ports(edev, rule, t)) |
1750 | return -EINVAL; |
1751 | |
1752 | return qede_set_v6_tuple_to_profile(edev, t, zaddr: &zero_addr); |
1753 | } |
1754 | |
1755 | static int |
1756 | qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, |
1757 | struct qede_arfs_tuple *t) |
1758 | { |
1759 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { |
1760 | struct flow_match_ipv4_addrs match; |
1761 | |
1762 | flow_rule_match_ipv4_addrs(rule, out: &match); |
1763 | if ((match.key->src && match.mask->src != htonl(U32_MAX)) || |
1764 | (match.key->dst && match.mask->dst != htonl(U32_MAX))) { |
1765 | DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n" ); |
1766 | return -EINVAL; |
1767 | } |
1768 | |
1769 | t->src_ipv4 = match.key->src; |
1770 | t->dst_ipv4 = match.key->dst; |
1771 | } |
1772 | |
1773 | if (qede_flow_parse_ports(edev, rule, t)) |
1774 | return -EINVAL; |
1775 | |
1776 | return qede_set_v4_tuple_to_profile(edev, t); |
1777 | } |
1778 | |
1779 | static int |
1780 | qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, |
1781 | struct qede_arfs_tuple *tuple) |
1782 | { |
1783 | tuple->ip_proto = IPPROTO_TCP; |
1784 | tuple->eth_proto = htons(ETH_P_IPV6); |
1785 | |
1786 | return qede_flow_parse_v6_common(edev, rule, t: tuple); |
1787 | } |
1788 | |
1789 | static int |
1790 | qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, |
1791 | struct qede_arfs_tuple *tuple) |
1792 | { |
1793 | tuple->ip_proto = IPPROTO_TCP; |
1794 | tuple->eth_proto = htons(ETH_P_IP); |
1795 | |
1796 | return qede_flow_parse_v4_common(edev, rule, t: tuple); |
1797 | } |
1798 | |
1799 | static int |
1800 | qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, |
1801 | struct qede_arfs_tuple *tuple) |
1802 | { |
1803 | tuple->ip_proto = IPPROTO_UDP; |
1804 | tuple->eth_proto = htons(ETH_P_IPV6); |
1805 | |
1806 | return qede_flow_parse_v6_common(edev, rule, t: tuple); |
1807 | } |
1808 | |
1809 | static int |
1810 | qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, |
1811 | struct qede_arfs_tuple *tuple) |
1812 | { |
1813 | tuple->ip_proto = IPPROTO_UDP; |
1814 | tuple->eth_proto = htons(ETH_P_IP); |
1815 | |
1816 | return qede_flow_parse_v4_common(edev, rule, t: tuple); |
1817 | } |
1818 | |
1819 | static int |
1820 | qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, |
1821 | struct flow_rule *rule, struct qede_arfs_tuple *tuple) |
1822 | { |
1823 | struct flow_dissector *dissector = rule->match.dissector; |
1824 | int rc = -EINVAL; |
1825 | u8 ip_proto = 0; |
1826 | |
1827 | memset(tuple, 0, sizeof(*tuple)); |
1828 | |
1829 | if (dissector->used_keys & |
1830 | ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | |
1831 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
1832 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | |
1833 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
1834 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { |
1835 | DP_NOTICE(edev, "Unsupported key set:0x%llx\n" , |
1836 | dissector->used_keys); |
1837 | return -EOPNOTSUPP; |
1838 | } |
1839 | |
1840 | if (proto != htons(ETH_P_IP) && |
1841 | proto != htons(ETH_P_IPV6)) { |
1842 | DP_NOTICE(edev, "Unsupported proto=0x%x\n" , proto); |
1843 | return -EPROTONOSUPPORT; |
1844 | } |
1845 | |
1846 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) { |
1847 | struct flow_match_basic match; |
1848 | |
1849 | flow_rule_match_basic(rule, out: &match); |
1850 | ip_proto = match.key->ip_proto; |
1851 | } |
1852 | |
1853 | if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) |
1854 | rc = qede_flow_parse_tcp_v4(edev, rule, tuple); |
1855 | else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) |
1856 | rc = qede_flow_parse_tcp_v6(edev, rule, tuple); |
1857 | else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) |
1858 | rc = qede_flow_parse_udp_v4(edev, rule, tuple); |
1859 | else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) |
1860 | rc = qede_flow_parse_udp_v6(edev, rule, tuple); |
1861 | else |
1862 | DP_NOTICE(edev, "Invalid protocol request\n" ); |
1863 | |
1864 | return rc; |
1865 | } |
1866 | |
1867 | int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, |
1868 | struct flow_cls_offload *f) |
1869 | { |
1870 | struct qede_arfs_fltr_node *n; |
1871 | int min_hlen, rc = -EINVAL; |
1872 | struct qede_arfs_tuple t; |
1873 | |
1874 | __qede_lock(edev); |
1875 | |
1876 | if (!edev->arfs) { |
1877 | rc = -EPERM; |
1878 | goto unlock; |
1879 | } |
1880 | |
1881 | /* parse flower attribute and prepare filter */ |
1882 | if (qede_parse_flow_attr(edev, proto, rule: f->rule, tuple: &t)) |
1883 | goto unlock; |
1884 | |
1885 | /* Validate profile mode and number of filters */ |
1886 | if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) || |
1887 | edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) { |
1888 | DP_NOTICE(edev, |
1889 | "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n" , |
1890 | t.mode, edev->arfs->mode, edev->arfs->filter_count); |
1891 | goto unlock; |
1892 | } |
1893 | |
1894 | /* parse tc actions and get the vf_id */ |
1895 | if (qede_parse_actions(edev, flow_action: &f->rule->action, extack: f->common.extack)) |
1896 | goto unlock; |
1897 | |
1898 | if (qede_flow_find_fltr(edev, t: &t)) { |
1899 | rc = -EEXIST; |
1900 | goto unlock; |
1901 | } |
1902 | |
1903 | n = kzalloc(size: sizeof(*n), GFP_KERNEL); |
1904 | if (!n) { |
1905 | rc = -ENOMEM; |
1906 | goto unlock; |
1907 | } |
1908 | |
1909 | min_hlen = qede_flow_get_min_header_size(t: &t); |
1910 | |
1911 | n->data = kzalloc(size: min_hlen, GFP_KERNEL); |
1912 | if (!n->data) { |
1913 | kfree(objp: n); |
1914 | rc = -ENOMEM; |
1915 | goto unlock; |
1916 | } |
1917 | |
1918 | memcpy(&n->tuple, &t, sizeof(n->tuple)); |
1919 | |
1920 | n->buf_len = min_hlen; |
1921 | n->b_is_drop = true; |
1922 | n->sw_id = f->cookie; |
1923 | |
1924 | n->tuple.build_hdr(&n->tuple, n->data); |
1925 | |
1926 | rc = qede_enqueue_fltr_and_config_searcher(edev, fltr: n, bucket_idx: 0); |
1927 | if (rc) |
1928 | goto unlock; |
1929 | |
1930 | qede_configure_arfs_fltr(edev, n, rxq_id: n->rxq_id, add_fltr: true); |
1931 | rc = qede_poll_arfs_filter_config(edev, fltr: n); |
1932 | |
1933 | unlock: |
1934 | __qede_unlock(edev); |
1935 | return rc; |
1936 | } |
1937 | |
1938 | static int qede_flow_spec_validate(struct qede_dev *edev, |
1939 | struct flow_action *flow_action, |
1940 | struct qede_arfs_tuple *t, |
1941 | __u32 location) |
1942 | { |
1943 | if (location >= QEDE_RFS_MAX_FLTR) { |
1944 | DP_INFO(edev, "Location out-of-bounds\n" ); |
1945 | return -EINVAL; |
1946 | } |
1947 | |
1948 | /* Check location isn't already in use */ |
1949 | if (test_bit(location, edev->arfs->arfs_fltr_bmap)) { |
1950 | DP_INFO(edev, "Location already in use\n" ); |
1951 | return -EINVAL; |
1952 | } |
1953 | |
1954 | /* Check if the filtering-mode could support the filter */ |
1955 | if (edev->arfs->filter_count && |
1956 | edev->arfs->mode != t->mode) { |
1957 | DP_INFO(edev, |
1958 | "flow_spec would require filtering mode %08x, but %08x is configured\n" , |
1959 | t->mode, edev->arfs->filter_count); |
1960 | return -EINVAL; |
1961 | } |
1962 | |
1963 | if (qede_parse_actions(edev, flow_action, NULL)) |
1964 | return -EINVAL; |
1965 | |
1966 | return 0; |
1967 | } |
1968 | |
1969 | static int qede_flow_spec_to_rule(struct qede_dev *edev, |
1970 | struct qede_arfs_tuple *t, |
1971 | struct ethtool_rx_flow_spec *fs) |
1972 | { |
1973 | struct ethtool_rx_flow_spec_input input = {}; |
1974 | struct ethtool_rx_flow_rule *flow; |
1975 | __be16 proto; |
1976 | int err = 0; |
1977 | |
1978 | if (qede_flow_spec_validate_unused(edev, fs)) |
1979 | return -EOPNOTSUPP; |
1980 | |
1981 | switch ((fs->flow_type & ~FLOW_EXT)) { |
1982 | case TCP_V4_FLOW: |
1983 | case UDP_V4_FLOW: |
1984 | proto = htons(ETH_P_IP); |
1985 | break; |
1986 | case TCP_V6_FLOW: |
1987 | case UDP_V6_FLOW: |
1988 | proto = htons(ETH_P_IPV6); |
1989 | break; |
1990 | default: |
1991 | DP_VERBOSE(edev, NETIF_MSG_IFUP, |
1992 | "Can't support flow of type %08x\n" , fs->flow_type); |
1993 | return -EOPNOTSUPP; |
1994 | } |
1995 | |
1996 | input.fs = fs; |
1997 | flow = ethtool_rx_flow_rule_create(input: &input); |
1998 | if (IS_ERR(ptr: flow)) |
1999 | return PTR_ERR(ptr: flow); |
2000 | |
2001 | if (qede_parse_flow_attr(edev, proto, rule: flow->rule, tuple: t)) { |
2002 | err = -EINVAL; |
2003 | goto err_out; |
2004 | } |
2005 | |
2006 | /* Make sure location is valid and filter isn't already set */ |
2007 | err = qede_flow_spec_validate(edev, flow_action: &flow->rule->action, t, |
2008 | location: fs->location); |
2009 | err_out: |
2010 | ethtool_rx_flow_rule_destroy(rule: flow); |
2011 | return err; |
2012 | |
2013 | } |
2014 | |
2015 | int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) |
2016 | { |
2017 | struct ethtool_rx_flow_spec *fsp = &info->fs; |
2018 | struct qede_arfs_fltr_node *n; |
2019 | struct qede_arfs_tuple t; |
2020 | int min_hlen, rc; |
2021 | |
2022 | __qede_lock(edev); |
2023 | |
2024 | if (!edev->arfs) { |
2025 | rc = -EPERM; |
2026 | goto unlock; |
2027 | } |
2028 | |
2029 | /* Translate the flow specification into something fittign our DB */ |
2030 | rc = qede_flow_spec_to_rule(edev, t: &t, fs: fsp); |
2031 | if (rc) |
2032 | goto unlock; |
2033 | |
2034 | if (qede_flow_find_fltr(edev, t: &t)) { |
2035 | rc = -EINVAL; |
2036 | goto unlock; |
2037 | } |
2038 | |
2039 | n = kzalloc(size: sizeof(*n), GFP_KERNEL); |
2040 | if (!n) { |
2041 | rc = -ENOMEM; |
2042 | goto unlock; |
2043 | } |
2044 | |
2045 | min_hlen = qede_flow_get_min_header_size(t: &t); |
2046 | n->data = kzalloc(size: min_hlen, GFP_KERNEL); |
2047 | if (!n->data) { |
2048 | kfree(objp: n); |
2049 | rc = -ENOMEM; |
2050 | goto unlock; |
2051 | } |
2052 | |
2053 | n->sw_id = fsp->location; |
2054 | set_bit(nr: n->sw_id, addr: edev->arfs->arfs_fltr_bmap); |
2055 | n->buf_len = min_hlen; |
2056 | |
2057 | memcpy(&n->tuple, &t, sizeof(n->tuple)); |
2058 | |
2059 | qede_flow_set_destination(edev, n, fs: fsp); |
2060 | |
2061 | /* Build a minimal header according to the flow */ |
2062 | n->tuple.build_hdr(&n->tuple, n->data); |
2063 | |
2064 | rc = qede_enqueue_fltr_and_config_searcher(edev, fltr: n, bucket_idx: 0); |
2065 | if (rc) |
2066 | goto unlock; |
2067 | |
2068 | qede_configure_arfs_fltr(edev, n, rxq_id: n->rxq_id, add_fltr: true); |
2069 | rc = qede_poll_arfs_filter_config(edev, fltr: n); |
2070 | unlock: |
2071 | __qede_unlock(edev); |
2072 | |
2073 | return rc; |
2074 | } |
2075 | |