1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ |
3 | |
4 | #include <linux/skbuff.h> |
5 | #include <net/devlink.h> |
6 | #include <net/pkt_cls.h> |
7 | |
8 | #include "cmsg.h" |
9 | #include "main.h" |
10 | #include "conntrack.h" |
11 | #include "../nfpcore/nfp_cpp.h" |
12 | #include "../nfpcore/nfp_nsp.h" |
13 | #include "../nfp_app.h" |
14 | #include "../nfp_main.h" |
15 | #include "../nfp_net.h" |
16 | #include "../nfp_port.h" |
17 | |
18 | #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ |
19 | (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ |
20 | TCPHDR_PSH | TCPHDR_URG) |
21 | |
22 | #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ |
23 | (FLOW_DIS_IS_FRAGMENT | \ |
24 | FLOW_DIS_FIRST_FRAG) |
25 | |
26 | #define NFP_FLOWER_WHITELIST_DISSECTOR \ |
27 | (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \ |
28 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \ |
29 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ |
30 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ |
31 | BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \ |
32 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \ |
33 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ |
34 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \ |
35 | BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \ |
36 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ |
37 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ |
38 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ |
39 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
40 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ |
41 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ |
42 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \ |
43 | BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \ |
44 | BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \ |
45 | BIT_ULL(FLOW_DISSECTOR_KEY_META) | \ |
46 | BIT_ULL(FLOW_DISSECTOR_KEY_IP)) |
47 | |
48 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ |
49 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
50 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ |
51 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ |
52 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ |
53 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ |
54 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ |
55 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) |
56 | |
57 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ |
58 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
59 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) |
60 | |
61 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ |
62 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
63 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) |
64 | |
65 | #define NFP_FLOWER_MERGE_FIELDS \ |
66 | (NFP_FLOWER_LAYER_PORT | \ |
67 | NFP_FLOWER_LAYER_MAC | \ |
68 | NFP_FLOWER_LAYER_TP | \ |
69 | NFP_FLOWER_LAYER_IPV4 | \ |
70 | NFP_FLOWER_LAYER_IPV6) |
71 | |
72 | #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ |
73 | (NFP_FLOWER_LAYER_EXT_META | \ |
74 | NFP_FLOWER_LAYER_PORT | \ |
75 | NFP_FLOWER_LAYER_MAC | \ |
76 | NFP_FLOWER_LAYER_IPV4 | \ |
77 | NFP_FLOWER_LAYER_IPV6) |
78 | |
79 | struct nfp_flower_merge_check { |
80 | union { |
81 | struct { |
82 | __be16 tci; |
83 | struct nfp_flower_mac_mpls l2; |
84 | struct nfp_flower_tp_ports l4; |
85 | union { |
86 | struct nfp_flower_ipv4 ipv4; |
87 | struct nfp_flower_ipv6 ipv6; |
88 | }; |
89 | }; |
90 | unsigned long vals[8]; |
91 | }; |
92 | }; |
93 | |
94 | int |
95 | nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, |
96 | u8 mtype) |
97 | { |
98 | u32 meta_len, key_len, mask_len, act_len, tot_len; |
99 | struct sk_buff *skb; |
100 | unsigned char *msg; |
101 | |
102 | meta_len = sizeof(struct nfp_fl_rule_metadata); |
103 | key_len = nfp_flow->meta.key_len; |
104 | mask_len = nfp_flow->meta.mask_len; |
105 | act_len = nfp_flow->meta.act_len; |
106 | |
107 | tot_len = meta_len + key_len + mask_len + act_len; |
108 | |
109 | /* Convert to long words as firmware expects |
110 | * lengths in units of NFP_FL_LW_SIZ. |
111 | */ |
112 | nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; |
113 | nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; |
114 | nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; |
115 | |
116 | skb = nfp_flower_cmsg_alloc(app, size: tot_len, type: mtype, GFP_KERNEL); |
117 | if (!skb) |
118 | return -ENOMEM; |
119 | |
120 | msg = nfp_flower_cmsg_get_data(skb); |
121 | memcpy(msg, &nfp_flow->meta, meta_len); |
122 | memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); |
123 | memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); |
124 | memcpy(&msg[meta_len + key_len + mask_len], |
125 | nfp_flow->action_data, act_len); |
126 | |
127 | /* Convert back to bytes as software expects |
128 | * lengths in units of bytes. |
129 | */ |
130 | nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; |
131 | nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; |
132 | nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; |
133 | |
134 | nfp_ctrl_tx(nn: app->ctrl, skb); |
135 | |
136 | return 0; |
137 | } |
138 | |
139 | static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule) |
140 | { |
141 | return flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IPV4_ADDRS) || |
142 | flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IPV6_ADDRS) || |
143 | flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS) || |
144 | flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ICMP); |
145 | } |
146 | |
147 | static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule) |
148 | { |
149 | return flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS) || |
150 | flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ICMP); |
151 | } |
152 | |
153 | static int |
154 | nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, |
155 | u32 *key_layer_two, int *key_size, bool ipv6, |
156 | struct netlink_ext_ack *extack) |
157 | { |
158 | if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || |
159 | (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { |
160 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length" ); |
161 | return -EOPNOTSUPP; |
162 | } |
163 | |
164 | if (enc_opts->len > 0) { |
165 | *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; |
166 | *key_size += sizeof(struct nfp_flower_geneve_options); |
167 | } |
168 | |
169 | return 0; |
170 | } |
171 | |
172 | static int |
173 | nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, |
174 | struct flow_dissector_key_enc_opts *enc_op, |
175 | u32 *key_layer_two, u8 *key_layer, int *key_size, |
176 | struct nfp_flower_priv *priv, |
177 | enum nfp_flower_tun_type *tun_type, bool ipv6, |
178 | struct netlink_ext_ack *extack) |
179 | { |
180 | int err; |
181 | |
182 | switch (enc_ports->dst) { |
183 | case htons(IANA_VXLAN_UDP_PORT): |
184 | *tun_type = NFP_FL_TUNNEL_VXLAN; |
185 | *key_layer |= NFP_FLOWER_LAYER_VXLAN; |
186 | |
187 | if (ipv6) { |
188 | *key_layer |= NFP_FLOWER_LAYER_EXT_META; |
189 | *key_size += sizeof(struct nfp_flower_ext_meta); |
190 | *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
191 | *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); |
192 | } else { |
193 | *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
194 | } |
195 | |
196 | if (enc_op) { |
197 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels" ); |
198 | return -EOPNOTSUPP; |
199 | } |
200 | break; |
201 | case htons(GENEVE_UDP_PORT): |
202 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { |
203 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload" ); |
204 | return -EOPNOTSUPP; |
205 | } |
206 | *tun_type = NFP_FL_TUNNEL_GENEVE; |
207 | *key_layer |= NFP_FLOWER_LAYER_EXT_META; |
208 | *key_size += sizeof(struct nfp_flower_ext_meta); |
209 | *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; |
210 | |
211 | if (ipv6) { |
212 | *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
213 | *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); |
214 | } else { |
215 | *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
216 | } |
217 | |
218 | if (!enc_op) |
219 | break; |
220 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { |
221 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload" ); |
222 | return -EOPNOTSUPP; |
223 | } |
224 | err = nfp_flower_calc_opt_layer(enc_opts: enc_op, key_layer_two, key_size, |
225 | ipv6, extack); |
226 | if (err) |
227 | return err; |
228 | break; |
229 | default: |
230 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown" ); |
231 | return -EOPNOTSUPP; |
232 | } |
233 | |
234 | return 0; |
235 | } |
236 | |
237 | int |
238 | nfp_flower_calculate_key_layers(struct nfp_app *app, |
239 | struct net_device *netdev, |
240 | struct nfp_fl_key_ls *ret_key_ls, |
241 | struct flow_rule *rule, |
242 | enum nfp_flower_tun_type *tun_type, |
243 | struct netlink_ext_ack *extack) |
244 | { |
245 | struct flow_dissector *dissector = rule->match.dissector; |
246 | struct flow_match_basic basic = { NULL, NULL}; |
247 | struct nfp_flower_priv *priv = app->priv; |
248 | u32 key_layer_two; |
249 | u8 key_layer; |
250 | int key_size; |
251 | int err; |
252 | |
253 | if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { |
254 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported" ); |
255 | return -EOPNOTSUPP; |
256 | } |
257 | |
258 | /* If any tun dissector is used then the required set must be used. */ |
259 | if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && |
260 | (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) |
261 | != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R && |
262 | (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) |
263 | != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { |
264 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported" ); |
265 | return -EOPNOTSUPP; |
266 | } |
267 | |
268 | key_layer_two = 0; |
269 | key_layer = NFP_FLOWER_LAYER_PORT; |
270 | key_size = sizeof(struct nfp_flower_meta_tci) + |
271 | sizeof(struct nfp_flower_in_port); |
272 | |
273 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS) || |
274 | flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_MPLS)) { |
275 | key_layer |= NFP_FLOWER_LAYER_MAC; |
276 | key_size += sizeof(struct nfp_flower_mac_mpls); |
277 | } |
278 | |
279 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN)) { |
280 | struct flow_match_vlan vlan; |
281 | |
282 | flow_rule_match_vlan(rule, out: &vlan); |
283 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && |
284 | vlan.key->vlan_priority) { |
285 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload" ); |
286 | return -EOPNOTSUPP; |
287 | } |
288 | if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && |
289 | !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { |
290 | key_layer |= NFP_FLOWER_LAYER_EXT_META; |
291 | key_size += sizeof(struct nfp_flower_ext_meta); |
292 | key_size += sizeof(struct nfp_flower_vlan); |
293 | key_layer_two |= NFP_FLOWER_LAYER2_QINQ; |
294 | } |
295 | } |
296 | |
297 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CVLAN)) { |
298 | struct flow_match_vlan cvlan; |
299 | |
300 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
301 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload" ); |
302 | return -EOPNOTSUPP; |
303 | } |
304 | |
305 | flow_rule_match_vlan(rule, out: &cvlan); |
306 | if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { |
307 | key_layer |= NFP_FLOWER_LAYER_EXT_META; |
308 | key_size += sizeof(struct nfp_flower_ext_meta); |
309 | key_size += sizeof(struct nfp_flower_vlan); |
310 | key_layer_two |= NFP_FLOWER_LAYER2_QINQ; |
311 | } |
312 | } |
313 | |
314 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
315 | struct flow_match_enc_opts enc_op = { NULL, NULL }; |
316 | struct flow_match_ipv4_addrs ipv4_addrs; |
317 | struct flow_match_ipv6_addrs ipv6_addrs; |
318 | struct flow_match_control enc_ctl; |
319 | struct flow_match_ports enc_ports; |
320 | bool ipv6_tun = false; |
321 | |
322 | flow_rule_match_enc_control(rule, out: &enc_ctl); |
323 | |
324 | if (enc_ctl.mask->addr_type != 0xffff) { |
325 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported" ); |
326 | return -EOPNOTSUPP; |
327 | } |
328 | |
329 | ipv6_tun = enc_ctl.key->addr_type == |
330 | FLOW_DISSECTOR_KEY_IPV6_ADDRS; |
331 | if (ipv6_tun && |
332 | !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { |
333 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels" ); |
334 | return -EOPNOTSUPP; |
335 | } |
336 | |
337 | if (!ipv6_tun && |
338 | enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
339 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6" ); |
340 | return -EOPNOTSUPP; |
341 | } |
342 | |
343 | if (ipv6_tun) { |
344 | flow_rule_match_enc_ipv6_addrs(rule, out: &ipv6_addrs); |
345 | if (memchr_inv(p: &ipv6_addrs.mask->dst, c: 0xff, |
346 | size: sizeof(ipv6_addrs.mask->dst))) { |
347 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported" ); |
348 | return -EOPNOTSUPP; |
349 | } |
350 | } else { |
351 | flow_rule_match_enc_ipv4_addrs(rule, out: &ipv4_addrs); |
352 | if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { |
353 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported" ); |
354 | return -EOPNOTSUPP; |
355 | } |
356 | } |
357 | |
358 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_OPTS)) |
359 | flow_rule_match_enc_opts(rule, out: &enc_op); |
360 | |
361 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_PORTS)) { |
362 | /* Check if GRE, which has no enc_ports */ |
363 | if (!netif_is_gretap(dev: netdev) && !netif_is_ip6gretap(dev: netdev)) { |
364 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels" ); |
365 | return -EOPNOTSUPP; |
366 | } |
367 | |
368 | *tun_type = NFP_FL_TUNNEL_GRE; |
369 | key_layer |= NFP_FLOWER_LAYER_EXT_META; |
370 | key_size += sizeof(struct nfp_flower_ext_meta); |
371 | key_layer_two |= NFP_FLOWER_LAYER2_GRE; |
372 | |
373 | if (ipv6_tun) { |
374 | key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
375 | key_size += |
376 | sizeof(struct nfp_flower_ipv6_gre_tun); |
377 | } else { |
378 | key_size += |
379 | sizeof(struct nfp_flower_ipv4_gre_tun); |
380 | } |
381 | |
382 | if (enc_op.key) { |
383 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels" ); |
384 | return -EOPNOTSUPP; |
385 | } |
386 | } else { |
387 | flow_rule_match_enc_ports(rule, out: &enc_ports); |
388 | if (enc_ports.mask->dst != cpu_to_be16(~0)) { |
389 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported" ); |
390 | return -EOPNOTSUPP; |
391 | } |
392 | |
393 | err = nfp_flower_calc_udp_tun_layer(enc_ports: enc_ports.key, |
394 | enc_op: enc_op.key, |
395 | key_layer_two: &key_layer_two, |
396 | key_layer: &key_layer, |
397 | key_size: &key_size, priv, |
398 | tun_type, ipv6: ipv6_tun, |
399 | extack); |
400 | if (err) |
401 | return err; |
402 | |
403 | /* Ensure the ingress netdev matches the expected |
404 | * tun type. |
405 | */ |
406 | if (!nfp_fl_netdev_is_tunnel_type(netdev, tun_type: *tun_type)) { |
407 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type" ); |
408 | return -EOPNOTSUPP; |
409 | } |
410 | } |
411 | } |
412 | |
413 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) |
414 | flow_rule_match_basic(rule, out: &basic); |
415 | |
416 | if (basic.mask && basic.mask->n_proto) { |
417 | /* Ethernet type is present in the key. */ |
418 | switch (basic.key->n_proto) { |
419 | case cpu_to_be16(ETH_P_IP): |
420 | key_layer |= NFP_FLOWER_LAYER_IPV4; |
421 | key_size += sizeof(struct nfp_flower_ipv4); |
422 | break; |
423 | |
424 | case cpu_to_be16(ETH_P_IPV6): |
425 | key_layer |= NFP_FLOWER_LAYER_IPV6; |
426 | key_size += sizeof(struct nfp_flower_ipv6); |
427 | break; |
428 | |
429 | /* Currently we do not offload ARP |
430 | * because we rely on it to get to the host. |
431 | */ |
432 | case cpu_to_be16(ETH_P_ARP): |
433 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported" ); |
434 | return -EOPNOTSUPP; |
435 | |
436 | case cpu_to_be16(ETH_P_MPLS_UC): |
437 | case cpu_to_be16(ETH_P_MPLS_MC): |
438 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { |
439 | key_layer |= NFP_FLOWER_LAYER_MAC; |
440 | key_size += sizeof(struct nfp_flower_mac_mpls); |
441 | } |
442 | break; |
443 | |
444 | /* Will be included in layer 2. */ |
445 | case cpu_to_be16(ETH_P_8021Q): |
446 | break; |
447 | |
448 | default: |
449 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported" ); |
450 | return -EOPNOTSUPP; |
451 | } |
452 | } else if (nfp_flower_check_higher_than_mac(rule)) { |
453 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType" ); |
454 | return -EOPNOTSUPP; |
455 | } |
456 | |
457 | if (basic.mask && basic.mask->ip_proto) { |
458 | switch (basic.key->ip_proto) { |
459 | case IPPROTO_TCP: |
460 | case IPPROTO_UDP: |
461 | case IPPROTO_SCTP: |
462 | case IPPROTO_ICMP: |
463 | case IPPROTO_ICMPV6: |
464 | key_layer |= NFP_FLOWER_LAYER_TP; |
465 | key_size += sizeof(struct nfp_flower_tp_ports); |
466 | break; |
467 | } |
468 | } |
469 | |
470 | if (!(key_layer & NFP_FLOWER_LAYER_TP) && |
471 | nfp_flower_check_higher_than_l3(rule)) { |
472 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type" ); |
473 | return -EOPNOTSUPP; |
474 | } |
475 | |
476 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_TCP)) { |
477 | struct flow_match_tcp tcp; |
478 | u32 tcp_flags; |
479 | |
480 | flow_rule_match_tcp(rule, out: &tcp); |
481 | tcp_flags = be16_to_cpu(tcp.key->flags); |
482 | |
483 | if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { |
484 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags" ); |
485 | return -EOPNOTSUPP; |
486 | } |
487 | |
488 | /* We only support PSH and URG flags when either |
489 | * FIN, SYN or RST is present as well. |
490 | */ |
491 | if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && |
492 | !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { |
493 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST" ); |
494 | return -EOPNOTSUPP; |
495 | } |
496 | |
497 | /* We need to store TCP flags in the either the IPv4 or IPv6 key |
498 | * space, thus we need to ensure we include a IPv4/IPv6 key |
499 | * layer if we have not done so already. |
500 | */ |
501 | if (!basic.key) { |
502 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol" ); |
503 | return -EOPNOTSUPP; |
504 | } |
505 | |
506 | if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && |
507 | !(key_layer & NFP_FLOWER_LAYER_IPV6)) { |
508 | switch (basic.key->n_proto) { |
509 | case cpu_to_be16(ETH_P_IP): |
510 | key_layer |= NFP_FLOWER_LAYER_IPV4; |
511 | key_size += sizeof(struct nfp_flower_ipv4); |
512 | break; |
513 | |
514 | case cpu_to_be16(ETH_P_IPV6): |
515 | key_layer |= NFP_FLOWER_LAYER_IPV6; |
516 | key_size += sizeof(struct nfp_flower_ipv6); |
517 | break; |
518 | |
519 | default: |
520 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6" ); |
521 | return -EOPNOTSUPP; |
522 | } |
523 | } |
524 | } |
525 | |
526 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CONTROL)) { |
527 | struct flow_match_control ctl; |
528 | |
529 | flow_rule_match_control(rule, out: &ctl); |
530 | if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { |
531 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag" ); |
532 | return -EOPNOTSUPP; |
533 | } |
534 | } |
535 | |
536 | ret_key_ls->key_layer = key_layer; |
537 | ret_key_ls->key_layer_two = key_layer_two; |
538 | ret_key_ls->key_size = key_size; |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | struct nfp_fl_payload * |
544 | nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) |
545 | { |
546 | struct nfp_fl_payload *flow_pay; |
547 | |
548 | flow_pay = kmalloc(size: sizeof(*flow_pay), GFP_KERNEL); |
549 | if (!flow_pay) |
550 | return NULL; |
551 | |
552 | flow_pay->meta.key_len = key_layer->key_size; |
553 | flow_pay->unmasked_data = kmalloc(size: key_layer->key_size, GFP_KERNEL); |
554 | if (!flow_pay->unmasked_data) |
555 | goto err_free_flow; |
556 | |
557 | flow_pay->meta.mask_len = key_layer->key_size; |
558 | flow_pay->mask_data = kmalloc(size: key_layer->key_size, GFP_KERNEL); |
559 | if (!flow_pay->mask_data) |
560 | goto err_free_unmasked; |
561 | |
562 | flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); |
563 | if (!flow_pay->action_data) |
564 | goto err_free_mask; |
565 | |
566 | flow_pay->nfp_tun_ipv4_addr = 0; |
567 | flow_pay->nfp_tun_ipv6 = NULL; |
568 | flow_pay->meta.flags = 0; |
569 | INIT_LIST_HEAD(list: &flow_pay->linked_flows); |
570 | flow_pay->in_hw = false; |
571 | flow_pay->pre_tun_rule.dev = NULL; |
572 | |
573 | return flow_pay; |
574 | |
575 | err_free_mask: |
576 | kfree(objp: flow_pay->mask_data); |
577 | err_free_unmasked: |
578 | kfree(objp: flow_pay->unmasked_data); |
579 | err_free_flow: |
580 | kfree(objp: flow_pay); |
581 | return NULL; |
582 | } |
583 | |
584 | static int |
585 | nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, |
586 | struct nfp_flower_merge_check *merge, |
587 | u8 *last_act_id, int *act_out) |
588 | { |
589 | struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; |
590 | struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; |
591 | struct nfp_fl_set_ip4_addrs *ipv4_add; |
592 | struct nfp_fl_set_ipv6_addr *ipv6_add; |
593 | struct nfp_fl_push_vlan *push_vlan; |
594 | struct nfp_fl_pre_tunnel *pre_tun; |
595 | struct nfp_fl_set_tport *tport; |
596 | struct nfp_fl_set_eth *eth; |
597 | struct nfp_fl_act_head *a; |
598 | unsigned int act_off = 0; |
599 | bool ipv6_tun = false; |
600 | u8 act_id = 0; |
601 | u8 *ports; |
602 | int i; |
603 | |
604 | while (act_off < flow->meta.act_len) { |
605 | a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; |
606 | act_id = a->jump_id; |
607 | |
608 | switch (act_id) { |
609 | case NFP_FL_ACTION_OPCODE_OUTPUT: |
610 | if (act_out) |
611 | (*act_out)++; |
612 | break; |
613 | case NFP_FL_ACTION_OPCODE_PUSH_VLAN: |
614 | push_vlan = (struct nfp_fl_push_vlan *)a; |
615 | if (push_vlan->vlan_tci) |
616 | merge->tci = cpu_to_be16(0xffff); |
617 | break; |
618 | case NFP_FL_ACTION_OPCODE_POP_VLAN: |
619 | merge->tci = cpu_to_be16(0); |
620 | break; |
621 | case NFP_FL_ACTION_OPCODE_SET_TUNNEL: |
622 | /* New tunnel header means l2 to l4 can be matched. */ |
623 | eth_broadcast_addr(addr: &merge->l2.mac_dst[0]); |
624 | eth_broadcast_addr(addr: &merge->l2.mac_src[0]); |
625 | memset(&merge->l4, 0xff, |
626 | sizeof(struct nfp_flower_tp_ports)); |
627 | if (ipv6_tun) |
628 | memset(&merge->ipv6, 0xff, |
629 | sizeof(struct nfp_flower_ipv6)); |
630 | else |
631 | memset(&merge->ipv4, 0xff, |
632 | sizeof(struct nfp_flower_ipv4)); |
633 | break; |
634 | case NFP_FL_ACTION_OPCODE_SET_ETHERNET: |
635 | eth = (struct nfp_fl_set_eth *)a; |
636 | for (i = 0; i < ETH_ALEN; i++) |
637 | merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; |
638 | for (i = 0; i < ETH_ALEN; i++) |
639 | merge->l2.mac_src[i] |= |
640 | eth->eth_addr_mask[ETH_ALEN + i]; |
641 | break; |
642 | case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: |
643 | ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; |
644 | merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; |
645 | merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; |
646 | break; |
647 | case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: |
648 | ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; |
649 | merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; |
650 | merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; |
651 | break; |
652 | case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: |
653 | ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; |
654 | for (i = 0; i < 4; i++) |
655 | merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= |
656 | ipv6_add->ipv6[i].mask; |
657 | break; |
658 | case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: |
659 | ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; |
660 | for (i = 0; i < 4; i++) |
661 | merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= |
662 | ipv6_add->ipv6[i].mask; |
663 | break; |
664 | case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: |
665 | ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; |
666 | merge->ipv6.ip_ext.ttl |= |
667 | ipv6_tc_hl_fl->ipv6_hop_limit_mask; |
668 | merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; |
669 | merge->ipv6.ipv6_flow_label_exthdr |= |
670 | ipv6_tc_hl_fl->ipv6_label_mask; |
671 | break; |
672 | case NFP_FL_ACTION_OPCODE_SET_UDP: |
673 | case NFP_FL_ACTION_OPCODE_SET_TCP: |
674 | tport = (struct nfp_fl_set_tport *)a; |
675 | ports = (u8 *)&merge->l4.port_src; |
676 | for (i = 0; i < 4; i++) |
677 | ports[i] |= tport->tp_port_mask[i]; |
678 | break; |
679 | case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: |
680 | pre_tun = (struct nfp_fl_pre_tunnel *)a; |
681 | ipv6_tun = be16_to_cpu(pre_tun->flags) & |
682 | NFP_FL_PRE_TUN_IPV6; |
683 | break; |
684 | case NFP_FL_ACTION_OPCODE_PRE_LAG: |
685 | case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: |
686 | break; |
687 | default: |
688 | return -EOPNOTSUPP; |
689 | } |
690 | |
691 | act_off += a->len_lw << NFP_FL_LW_SIZ; |
692 | } |
693 | |
694 | if (last_act_id) |
695 | *last_act_id = act_id; |
696 | |
697 | return 0; |
698 | } |
699 | |
700 | static int |
701 | nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, |
702 | struct nfp_flower_merge_check *merge, |
703 | bool ) |
704 | { |
705 | struct nfp_flower_meta_tci *meta_tci; |
706 | u8 *mask = flow->mask_data; |
707 | u8 key_layer, match_size; |
708 | |
709 | memset(merge, 0, sizeof(struct nfp_flower_merge_check)); |
710 | |
711 | meta_tci = (struct nfp_flower_meta_tci *)mask; |
712 | key_layer = meta_tci->nfp_flow_key_layer; |
713 | |
714 | if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) |
715 | return -EOPNOTSUPP; |
716 | |
717 | merge->tci = meta_tci->tci; |
718 | mask += sizeof(struct nfp_flower_meta_tci); |
719 | |
720 | if (key_layer & NFP_FLOWER_LAYER_EXT_META) |
721 | mask += sizeof(struct nfp_flower_ext_meta); |
722 | |
723 | mask += sizeof(struct nfp_flower_in_port); |
724 | |
725 | if (key_layer & NFP_FLOWER_LAYER_MAC) { |
726 | match_size = sizeof(struct nfp_flower_mac_mpls); |
727 | memcpy(&merge->l2, mask, match_size); |
728 | mask += match_size; |
729 | } |
730 | |
731 | if (key_layer & NFP_FLOWER_LAYER_TP) { |
732 | match_size = sizeof(struct nfp_flower_tp_ports); |
733 | memcpy(&merge->l4, mask, match_size); |
734 | mask += match_size; |
735 | } |
736 | |
737 | if (key_layer & NFP_FLOWER_LAYER_IPV4) { |
738 | match_size = sizeof(struct nfp_flower_ipv4); |
739 | memcpy(&merge->ipv4, mask, match_size); |
740 | } |
741 | |
742 | if (key_layer & NFP_FLOWER_LAYER_IPV6) { |
743 | match_size = sizeof(struct nfp_flower_ipv6); |
744 | memcpy(&merge->ipv6, mask, match_size); |
745 | } |
746 | |
747 | return 0; |
748 | } |
749 | |
750 | static int |
751 | nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, |
752 | struct nfp_fl_payload *sub_flow2) |
753 | { |
754 | /* Two flows can be merged if sub_flow2 only matches on bits that are |
755 | * either matched by sub_flow1 or set by a sub_flow1 action. This |
756 | * ensures that every packet that hits sub_flow1 and recirculates is |
757 | * guaranteed to hit sub_flow2. |
758 | */ |
759 | struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; |
760 | int err, act_out = 0; |
761 | u8 last_act_id = 0; |
762 | |
763 | err = nfp_flower_populate_merge_match(flow: sub_flow1, merge: &sub_flow1_merge, |
764 | extra_fields: true); |
765 | if (err) |
766 | return err; |
767 | |
768 | err = nfp_flower_populate_merge_match(flow: sub_flow2, merge: &sub_flow2_merge, |
769 | extra_fields: false); |
770 | if (err) |
771 | return err; |
772 | |
773 | err = nfp_flower_update_merge_with_actions(flow: sub_flow1, merge: &sub_flow1_merge, |
774 | last_act_id: &last_act_id, act_out: &act_out); |
775 | if (err) |
776 | return err; |
777 | |
778 | /* Must only be 1 output action and it must be the last in sequence. */ |
779 | if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) |
780 | return -EOPNOTSUPP; |
781 | |
782 | /* Reject merge if sub_flow2 matches on something that is not matched |
783 | * on or set in an action by sub_flow1. |
784 | */ |
785 | err = bitmap_andnot(dst: sub_flow2_merge.vals, src1: sub_flow2_merge.vals, |
786 | src2: sub_flow1_merge.vals, |
787 | nbits: sizeof(struct nfp_flower_merge_check) * 8); |
788 | if (err) |
789 | return -EINVAL; |
790 | |
791 | return 0; |
792 | } |
793 | |
794 | static unsigned int |
795 | nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, |
796 | bool *tunnel_act) |
797 | { |
798 | unsigned int act_off = 0, act_len; |
799 | struct nfp_fl_act_head *a; |
800 | u8 act_id = 0; |
801 | |
802 | while (act_off < len) { |
803 | a = (struct nfp_fl_act_head *)&act_src[act_off]; |
804 | act_len = a->len_lw << NFP_FL_LW_SIZ; |
805 | act_id = a->jump_id; |
806 | |
807 | switch (act_id) { |
808 | case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: |
809 | if (tunnel_act) |
810 | *tunnel_act = true; |
811 | fallthrough; |
812 | case NFP_FL_ACTION_OPCODE_PRE_LAG: |
813 | memcpy(act_dst + act_off, act_src + act_off, act_len); |
814 | break; |
815 | default: |
816 | return act_off; |
817 | } |
818 | |
819 | act_off += act_len; |
820 | } |
821 | |
822 | return act_off; |
823 | } |
824 | |
825 | static int |
826 | nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) |
827 | { |
828 | struct nfp_fl_act_head *a; |
829 | unsigned int act_off = 0; |
830 | |
831 | while (act_off < len) { |
832 | a = (struct nfp_fl_act_head *)&acts[act_off]; |
833 | |
834 | if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) |
835 | *vlan = (struct nfp_fl_push_vlan *)a; |
836 | else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) |
837 | return -EOPNOTSUPP; |
838 | |
839 | act_off += a->len_lw << NFP_FL_LW_SIZ; |
840 | } |
841 | |
842 | /* Ensure any VLAN push also has an egress action. */ |
843 | if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) |
844 | return -EOPNOTSUPP; |
845 | |
846 | return 0; |
847 | } |
848 | |
849 | static int |
850 | nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) |
851 | { |
852 | struct nfp_fl_set_tun *tun; |
853 | struct nfp_fl_act_head *a; |
854 | unsigned int act_off = 0; |
855 | |
856 | while (act_off < len) { |
857 | a = (struct nfp_fl_act_head *)&acts[act_off]; |
858 | |
859 | if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { |
860 | tun = (struct nfp_fl_set_tun *)a; |
861 | tun->outer_vlan_tpid = vlan->vlan_tpid; |
862 | tun->outer_vlan_tci = vlan->vlan_tci; |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | act_off += a->len_lw << NFP_FL_LW_SIZ; |
868 | } |
869 | |
870 | /* Return error if no tunnel action is found. */ |
871 | return -EOPNOTSUPP; |
872 | } |
873 | |
874 | static int |
875 | nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, |
876 | struct nfp_fl_payload *sub_flow2, |
877 | struct nfp_fl_payload *merge_flow) |
878 | { |
879 | unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; |
880 | struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; |
881 | bool tunnel_act = false; |
882 | char *merge_act; |
883 | int err; |
884 | |
885 | /* The last action of sub_flow1 must be output - do not merge this. */ |
886 | sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); |
887 | sub2_act_len = sub_flow2->meta.act_len; |
888 | |
889 | if (!sub2_act_len) |
890 | return -EINVAL; |
891 | |
892 | if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) |
893 | return -EINVAL; |
894 | |
895 | /* A shortcut can only be applied if there is a single action. */ |
896 | if (sub1_act_len) |
897 | merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
898 | else |
899 | merge_flow->meta.shortcut = sub_flow2->meta.shortcut; |
900 | |
901 | merge_flow->meta.act_len = sub1_act_len + sub2_act_len; |
902 | merge_act = merge_flow->action_data; |
903 | |
904 | /* Copy any pre-actions to the start of merge flow action list. */ |
905 | pre_off1 = nfp_flower_copy_pre_actions(act_dst: merge_act, |
906 | act_src: sub_flow1->action_data, |
907 | len: sub1_act_len, tunnel_act: &tunnel_act); |
908 | merge_act += pre_off1; |
909 | sub1_act_len -= pre_off1; |
910 | pre_off2 = nfp_flower_copy_pre_actions(act_dst: merge_act, |
911 | act_src: sub_flow2->action_data, |
912 | len: sub2_act_len, NULL); |
913 | merge_act += pre_off2; |
914 | sub2_act_len -= pre_off2; |
915 | |
916 | /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes |
917 | * a tunnel, there are restrictions on what sub_flow 2 actions lead to a |
918 | * valid merge. |
919 | */ |
920 | if (tunnel_act) { |
921 | char *post_tun_acts = &sub_flow2->action_data[pre_off2]; |
922 | |
923 | err = nfp_fl_verify_post_tun_acts(acts: post_tun_acts, len: sub2_act_len, |
924 | vlan: &post_tun_push_vlan); |
925 | if (err) |
926 | return err; |
927 | |
928 | if (post_tun_push_vlan) { |
929 | pre_off2 += sizeof(*post_tun_push_vlan); |
930 | sub2_act_len -= sizeof(*post_tun_push_vlan); |
931 | } |
932 | } |
933 | |
934 | /* Copy remaining actions from sub_flows 1 and 2. */ |
935 | memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); |
936 | |
937 | if (post_tun_push_vlan) { |
938 | /* Update tunnel action in merge to include VLAN push. */ |
939 | err = nfp_fl_push_vlan_after_tun(acts: merge_act, len: sub1_act_len, |
940 | vlan: post_tun_push_vlan); |
941 | if (err) |
942 | return err; |
943 | |
944 | merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); |
945 | } |
946 | |
947 | merge_act += sub1_act_len; |
948 | memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); |
949 | |
950 | return 0; |
951 | } |
952 | |
953 | /* Flow link code should only be accessed under RTNL. */ |
954 | static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) |
955 | { |
956 | list_del(entry: &link->merge_flow.list); |
957 | list_del(entry: &link->sub_flow.list); |
958 | kfree(objp: link); |
959 | } |
960 | |
961 | static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, |
962 | struct nfp_fl_payload *sub_flow) |
963 | { |
964 | struct nfp_fl_payload_link *link; |
965 | |
966 | list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) |
967 | if (link->sub_flow.flow == sub_flow) { |
968 | nfp_flower_unlink_flow(link); |
969 | return; |
970 | } |
971 | } |
972 | |
973 | static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, |
974 | struct nfp_fl_payload *sub_flow) |
975 | { |
976 | struct nfp_fl_payload_link *link; |
977 | |
978 | link = kmalloc(size: sizeof(*link), GFP_KERNEL); |
979 | if (!link) |
980 | return -ENOMEM; |
981 | |
982 | link->merge_flow.flow = merge_flow; |
983 | list_add_tail(new: &link->merge_flow.list, head: &merge_flow->linked_flows); |
984 | link->sub_flow.flow = sub_flow; |
985 | list_add_tail(new: &link->sub_flow.list, head: &sub_flow->linked_flows); |
986 | |
987 | return 0; |
988 | } |
989 | |
990 | /** |
991 | * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. |
992 | * @app: Pointer to the APP handle |
993 | * @sub_flow1: Initial flow matched to produce merge hint |
994 | * @sub_flow2: Post recirculation flow matched in merge hint |
995 | * |
996 | * Combines 2 flows (if valid) to a single flow, removing the initial from hw |
997 | * and offloading the new, merged flow. |
998 | * |
999 | * Return: negative value on error, 0 in success. |
1000 | */ |
1001 | int nfp_flower_merge_offloaded_flows(struct nfp_app *app, |
1002 | struct nfp_fl_payload *sub_flow1, |
1003 | struct nfp_fl_payload *sub_flow2) |
1004 | { |
1005 | struct nfp_flower_priv *priv = app->priv; |
1006 | struct nfp_fl_payload *merge_flow; |
1007 | struct nfp_fl_key_ls merge_key_ls; |
1008 | struct nfp_merge_info *merge_info; |
1009 | u64 parent_ctx = 0; |
1010 | int err; |
1011 | |
1012 | if (sub_flow1 == sub_flow2 || |
1013 | nfp_flower_is_merge_flow(flow_pay: sub_flow1) || |
1014 | nfp_flower_is_merge_flow(flow_pay: sub_flow2)) |
1015 | return -EINVAL; |
1016 | |
1017 | /* Check if the two flows are already merged */ |
1018 | parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; |
1019 | parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); |
1020 | if (rhashtable_lookup_fast(ht: &priv->merge_table, |
1021 | key: &parent_ctx, params: merge_table_params)) { |
1022 | nfp_flower_cmsg_warn(app, "The two flows are already merged.\n" ); |
1023 | return 0; |
1024 | } |
1025 | |
1026 | err = nfp_flower_can_merge(sub_flow1, sub_flow2); |
1027 | if (err) |
1028 | return err; |
1029 | |
1030 | merge_key_ls.key_size = sub_flow1->meta.key_len; |
1031 | |
1032 | merge_flow = nfp_flower_allocate_new(key_layer: &merge_key_ls); |
1033 | if (!merge_flow) |
1034 | return -ENOMEM; |
1035 | |
1036 | merge_flow->tc_flower_cookie = (unsigned long)merge_flow; |
1037 | merge_flow->ingress_dev = sub_flow1->ingress_dev; |
1038 | |
1039 | memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, |
1040 | sub_flow1->meta.key_len); |
1041 | memcpy(merge_flow->mask_data, sub_flow1->mask_data, |
1042 | sub_flow1->meta.mask_len); |
1043 | |
1044 | err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); |
1045 | if (err) |
1046 | goto err_destroy_merge_flow; |
1047 | |
1048 | err = nfp_flower_link_flows(merge_flow, sub_flow: sub_flow1); |
1049 | if (err) |
1050 | goto err_destroy_merge_flow; |
1051 | |
1052 | err = nfp_flower_link_flows(merge_flow, sub_flow: sub_flow2); |
1053 | if (err) |
1054 | goto err_unlink_sub_flow1; |
1055 | |
1056 | err = nfp_compile_flow_metadata(app, cookie: merge_flow->tc_flower_cookie, nfp_flow: merge_flow, |
1057 | netdev: merge_flow->ingress_dev, NULL); |
1058 | if (err) |
1059 | goto err_unlink_sub_flow2; |
1060 | |
1061 | err = rhashtable_insert_fast(ht: &priv->flow_table, obj: &merge_flow->fl_node, |
1062 | params: nfp_flower_table_params); |
1063 | if (err) |
1064 | goto err_release_metadata; |
1065 | |
1066 | merge_info = kmalloc(size: sizeof(*merge_info), GFP_KERNEL); |
1067 | if (!merge_info) { |
1068 | err = -ENOMEM; |
1069 | goto err_remove_rhash; |
1070 | } |
1071 | merge_info->parent_ctx = parent_ctx; |
1072 | err = rhashtable_insert_fast(ht: &priv->merge_table, obj: &merge_info->ht_node, |
1073 | params: merge_table_params); |
1074 | if (err) |
1075 | goto err_destroy_merge_info; |
1076 | |
1077 | err = nfp_flower_xmit_flow(app, nfp_flow: merge_flow, |
1078 | mtype: NFP_FLOWER_CMSG_TYPE_FLOW_MOD); |
1079 | if (err) |
1080 | goto err_remove_merge_info; |
1081 | |
1082 | merge_flow->in_hw = true; |
1083 | sub_flow1->in_hw = false; |
1084 | |
1085 | return 0; |
1086 | |
1087 | err_remove_merge_info: |
1088 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, |
1089 | &merge_info->ht_node, |
1090 | merge_table_params)); |
1091 | err_destroy_merge_info: |
1092 | kfree(objp: merge_info); |
1093 | err_remove_rhash: |
1094 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
1095 | &merge_flow->fl_node, |
1096 | nfp_flower_table_params)); |
1097 | err_release_metadata: |
1098 | nfp_modify_flow_metadata(app, nfp_flow: merge_flow); |
1099 | err_unlink_sub_flow2: |
1100 | nfp_flower_unlink_flows(merge_flow, sub_flow: sub_flow2); |
1101 | err_unlink_sub_flow1: |
1102 | nfp_flower_unlink_flows(merge_flow, sub_flow: sub_flow1); |
1103 | err_destroy_merge_flow: |
1104 | kfree(objp: merge_flow->action_data); |
1105 | kfree(objp: merge_flow->mask_data); |
1106 | kfree(objp: merge_flow->unmasked_data); |
1107 | kfree(objp: merge_flow); |
1108 | return err; |
1109 | } |
1110 | |
1111 | /** |
1112 | * nfp_flower_validate_pre_tun_rule() |
1113 | * @app: Pointer to the APP handle |
1114 | * @flow: Pointer to NFP flow representation of rule |
1115 | * @key_ls: Pointer to NFP key layers structure |
1116 | * @extack: Netlink extended ACK report |
1117 | * |
1118 | * Verifies the flow as a pre-tunnel rule. |
1119 | * |
1120 | * Return: negative value on error, 0 if verified. |
1121 | */ |
1122 | static int |
1123 | nfp_flower_validate_pre_tun_rule(struct nfp_app *app, |
1124 | struct nfp_fl_payload *flow, |
1125 | struct nfp_fl_key_ls *key_ls, |
1126 | struct netlink_ext_ack *extack) |
1127 | { |
1128 | struct nfp_flower_priv *priv = app->priv; |
1129 | struct nfp_flower_meta_tci *meta_tci; |
1130 | struct nfp_flower_mac_mpls *mac; |
1131 | u8 *ext = flow->unmasked_data; |
1132 | struct nfp_fl_act_head *act; |
1133 | u8 *mask = flow->mask_data; |
1134 | bool vlan = false; |
1135 | int act_offset; |
1136 | u8 key_layer; |
1137 | |
1138 | meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; |
1139 | key_layer = key_ls->key_layer; |
1140 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
1141 | if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { |
1142 | u16 vlan_tci = be16_to_cpu(meta_tci->tci); |
1143 | |
1144 | vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; |
1145 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); |
1146 | vlan = true; |
1147 | } else { |
1148 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); |
1149 | } |
1150 | } |
1151 | |
1152 | if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { |
1153 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields" ); |
1154 | return -EOPNOTSUPP; |
1155 | } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { |
1156 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields" ); |
1157 | return -EOPNOTSUPP; |
1158 | } |
1159 | |
1160 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { |
1161 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required" ); |
1162 | return -EOPNOTSUPP; |
1163 | } |
1164 | |
1165 | if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && |
1166 | !(key_layer & NFP_FLOWER_LAYER_IPV6)) { |
1167 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present" ); |
1168 | return -EOPNOTSUPP; |
1169 | } |
1170 | |
1171 | if (key_layer & NFP_FLOWER_LAYER_IPV6) |
1172 | flow->pre_tun_rule.is_ipv6 = true; |
1173 | else |
1174 | flow->pre_tun_rule.is_ipv6 = false; |
1175 | |
1176 | /* Skip fields known to exist. */ |
1177 | mask += sizeof(struct nfp_flower_meta_tci); |
1178 | ext += sizeof(struct nfp_flower_meta_tci); |
1179 | if (key_ls->key_layer_two) { |
1180 | mask += sizeof(struct nfp_flower_ext_meta); |
1181 | ext += sizeof(struct nfp_flower_ext_meta); |
1182 | } |
1183 | mask += sizeof(struct nfp_flower_in_port); |
1184 | ext += sizeof(struct nfp_flower_in_port); |
1185 | |
1186 | /* Ensure destination MAC address is fully matched. */ |
1187 | mac = (struct nfp_flower_mac_mpls *)mask; |
1188 | if (!is_broadcast_ether_addr(addr: &mac->mac_dst[0])) { |
1189 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked" ); |
1190 | return -EOPNOTSUPP; |
1191 | } |
1192 | |
1193 | /* Ensure source MAC address is fully matched. This is only needed |
1194 | * for firmware with the DECAP_V2 feature enabled. Don't do this |
1195 | * for firmware without this feature to keep old behaviour. |
1196 | */ |
1197 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { |
1198 | mac = (struct nfp_flower_mac_mpls *)mask; |
1199 | if (!is_broadcast_ether_addr(addr: &mac->mac_src[0])) { |
1200 | NL_SET_ERR_MSG_MOD(extack, |
1201 | "unsupported pre-tunnel rule: source MAC field must not be masked" ); |
1202 | return -EOPNOTSUPP; |
1203 | } |
1204 | } |
1205 | |
1206 | if (mac->mpls_lse) { |
1207 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported" ); |
1208 | return -EOPNOTSUPP; |
1209 | } |
1210 | |
1211 | /* Ensure destination MAC address matches pre_tun_dev. */ |
1212 | mac = (struct nfp_flower_mac_mpls *)ext; |
1213 | if (memcmp(p: &mac->mac_dst[0], q: flow->pre_tun_rule.dev->dev_addr, size: 6)) { |
1214 | NL_SET_ERR_MSG_MOD(extack, |
1215 | "unsupported pre-tunnel rule: dest MAC must match output dev MAC" ); |
1216 | return -EOPNOTSUPP; |
1217 | } |
1218 | |
1219 | /* Save mac addresses in pre_tun_rule entry for later use */ |
1220 | memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); |
1221 | memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); |
1222 | |
1223 | mask += sizeof(struct nfp_flower_mac_mpls); |
1224 | ext += sizeof(struct nfp_flower_mac_mpls); |
1225 | if (key_layer & NFP_FLOWER_LAYER_IPV4 || |
1226 | key_layer & NFP_FLOWER_LAYER_IPV6) { |
1227 | /* Flags and proto fields have same offset in IPv4 and IPv6. */ |
1228 | int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); |
1229 | int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); |
1230 | int size; |
1231 | int i; |
1232 | |
1233 | size = key_layer & NFP_FLOWER_LAYER_IPV4 ? |
1234 | sizeof(struct nfp_flower_ipv4) : |
1235 | sizeof(struct nfp_flower_ipv6); |
1236 | |
1237 | |
1238 | /* Ensure proto and flags are the only IP layer fields. */ |
1239 | for (i = 0; i < size; i++) |
1240 | if (mask[i] && i != ip_flags && i != ip_proto) { |
1241 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header" ); |
1242 | return -EOPNOTSUPP; |
1243 | } |
1244 | ext += size; |
1245 | mask += size; |
1246 | } |
1247 | |
1248 | if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
1249 | if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { |
1250 | struct nfp_flower_vlan *vlan_tags; |
1251 | u16 vlan_tpid; |
1252 | u16 vlan_tci; |
1253 | |
1254 | vlan_tags = (struct nfp_flower_vlan *)ext; |
1255 | |
1256 | vlan_tci = be16_to_cpu(vlan_tags->outer_tci); |
1257 | vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); |
1258 | |
1259 | vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; |
1260 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); |
1261 | flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); |
1262 | vlan = true; |
1263 | } else { |
1264 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); |
1265 | flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); |
1266 | } |
1267 | } |
1268 | |
1269 | /* Action must be a single egress or pop_vlan and egress. */ |
1270 | act_offset = 0; |
1271 | act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; |
1272 | if (vlan) { |
1273 | if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { |
1274 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action" ); |
1275 | return -EOPNOTSUPP; |
1276 | } |
1277 | |
1278 | act_offset += act->len_lw << NFP_FL_LW_SIZ; |
1279 | act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; |
1280 | } |
1281 | |
1282 | if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { |
1283 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected" ); |
1284 | return -EOPNOTSUPP; |
1285 | } |
1286 | |
1287 | act_offset += act->len_lw << NFP_FL_LW_SIZ; |
1288 | |
1289 | /* Ensure there are no more actions after egress. */ |
1290 | if (act_offset != flow->meta.act_len) { |
1291 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action" ); |
1292 | return -EOPNOTSUPP; |
1293 | } |
1294 | |
1295 | return 0; |
1296 | } |
1297 | |
1298 | static bool offload_pre_check(struct flow_cls_offload *flow) |
1299 | { |
1300 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: flow); |
1301 | struct flow_dissector *dissector = rule->match.dissector; |
1302 | struct flow_match_ct ct; |
1303 | |
1304 | if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { |
1305 | flow_rule_match_ct(rule, out: &ct); |
1306 | /* Allow special case where CT match is all 0 */ |
1307 | if (memchr_inv(p: ct.key, c: 0, size: sizeof(*ct.key))) |
1308 | return false; |
1309 | } |
1310 | |
1311 | if (flow->common.chain_index) |
1312 | return false; |
1313 | |
1314 | return true; |
1315 | } |
1316 | |
1317 | /** |
1318 | * nfp_flower_add_offload() - Adds a new flow to hardware. |
1319 | * @app: Pointer to the APP handle |
1320 | * @netdev: netdev structure. |
1321 | * @flow: TC flower classifier offload structure. |
1322 | * |
1323 | * Adds a new flow to the repeated hash structure and action payload. |
1324 | * |
1325 | * Return: negative value on error, 0 if configured successfully. |
1326 | */ |
1327 | static int |
1328 | nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, |
1329 | struct flow_cls_offload *flow) |
1330 | { |
1331 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: flow); |
1332 | enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; |
1333 | struct nfp_flower_priv *priv = app->priv; |
1334 | struct netlink_ext_ack *extack = NULL; |
1335 | struct nfp_fl_payload *flow_pay; |
1336 | struct nfp_fl_key_ls *key_layer; |
1337 | struct nfp_port *port = NULL; |
1338 | int err; |
1339 | |
1340 | extack = flow->common.extack; |
1341 | if (nfp_netdev_is_nfp_repr(netdev)) |
1342 | port = nfp_port_from_netdev(netdev); |
1343 | |
1344 | if (is_pre_ct_flow(flow)) |
1345 | return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL); |
1346 | |
1347 | if (is_post_ct_flow(flow)) |
1348 | return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack); |
1349 | |
1350 | if (!offload_pre_check(flow)) |
1351 | return -EOPNOTSUPP; |
1352 | |
1353 | key_layer = kmalloc(size: sizeof(*key_layer), GFP_KERNEL); |
1354 | if (!key_layer) |
1355 | return -ENOMEM; |
1356 | |
1357 | err = nfp_flower_calculate_key_layers(app, netdev, ret_key_ls: key_layer, rule, |
1358 | tun_type: &tun_type, extack); |
1359 | if (err) |
1360 | goto err_free_key_ls; |
1361 | |
1362 | flow_pay = nfp_flower_allocate_new(key_layer); |
1363 | if (!flow_pay) { |
1364 | err = -ENOMEM; |
1365 | goto err_free_key_ls; |
1366 | } |
1367 | |
1368 | err = nfp_flower_compile_flow_match(app, rule, key_ls: key_layer, netdev, |
1369 | nfp_flow: flow_pay, tun_type, extack); |
1370 | if (err) |
1371 | goto err_destroy_flow; |
1372 | |
1373 | err = nfp_flower_compile_action(app, rule, netdev, nfp_flow: flow_pay, extack); |
1374 | if (err) |
1375 | goto err_destroy_flow; |
1376 | |
1377 | if (flow_pay->pre_tun_rule.dev) { |
1378 | err = nfp_flower_validate_pre_tun_rule(app, flow: flow_pay, key_ls: key_layer, extack); |
1379 | if (err) |
1380 | goto err_destroy_flow; |
1381 | } |
1382 | |
1383 | err = nfp_compile_flow_metadata(app, cookie: flow->cookie, nfp_flow: flow_pay, netdev, extack); |
1384 | if (err) |
1385 | goto err_destroy_flow; |
1386 | |
1387 | flow_pay->tc_flower_cookie = flow->cookie; |
1388 | err = rhashtable_insert_fast(ht: &priv->flow_table, obj: &flow_pay->fl_node, |
1389 | params: nfp_flower_table_params); |
1390 | if (err) { |
1391 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads" ); |
1392 | goto err_release_metadata; |
1393 | } |
1394 | |
1395 | if (flow_pay->pre_tun_rule.dev) { |
1396 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { |
1397 | struct nfp_predt_entry *predt; |
1398 | |
1399 | predt = kzalloc(size: sizeof(*predt), GFP_KERNEL); |
1400 | if (!predt) { |
1401 | err = -ENOMEM; |
1402 | goto err_remove_rhash; |
1403 | } |
1404 | predt->flow_pay = flow_pay; |
1405 | INIT_LIST_HEAD(list: &predt->nn_list); |
1406 | spin_lock_bh(lock: &priv->predt_lock); |
1407 | list_add(new: &predt->list_head, head: &priv->predt_list); |
1408 | flow_pay->pre_tun_rule.predt = predt; |
1409 | nfp_tun_link_and_update_nn_entries(app, predt); |
1410 | spin_unlock_bh(lock: &priv->predt_lock); |
1411 | } else { |
1412 | err = nfp_flower_xmit_pre_tun_flow(app, flow: flow_pay); |
1413 | } |
1414 | } else { |
1415 | err = nfp_flower_xmit_flow(app, nfp_flow: flow_pay, |
1416 | mtype: NFP_FLOWER_CMSG_TYPE_FLOW_ADD); |
1417 | } |
1418 | |
1419 | if (err) |
1420 | goto err_remove_rhash; |
1421 | |
1422 | if (port) |
1423 | port->tc_offload_cnt++; |
1424 | |
1425 | flow_pay->in_hw = true; |
1426 | |
1427 | /* Deallocate flow payload when flower rule has been destroyed. */ |
1428 | kfree(objp: key_layer); |
1429 | |
1430 | return 0; |
1431 | |
1432 | err_remove_rhash: |
1433 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
1434 | &flow_pay->fl_node, |
1435 | nfp_flower_table_params)); |
1436 | err_release_metadata: |
1437 | nfp_modify_flow_metadata(app, nfp_flow: flow_pay); |
1438 | err_destroy_flow: |
1439 | if (flow_pay->nfp_tun_ipv6) |
1440 | nfp_tunnel_put_ipv6_off(app, entry: flow_pay->nfp_tun_ipv6); |
1441 | kfree(objp: flow_pay->action_data); |
1442 | kfree(objp: flow_pay->mask_data); |
1443 | kfree(objp: flow_pay->unmasked_data); |
1444 | kfree(objp: flow_pay); |
1445 | err_free_key_ls: |
1446 | kfree(objp: key_layer); |
1447 | return err; |
1448 | } |
1449 | |
1450 | static void |
1451 | nfp_flower_remove_merge_flow(struct nfp_app *app, |
1452 | struct nfp_fl_payload *del_sub_flow, |
1453 | struct nfp_fl_payload *merge_flow) |
1454 | { |
1455 | struct nfp_flower_priv *priv = app->priv; |
1456 | struct nfp_fl_payload_link *link, *temp; |
1457 | struct nfp_merge_info *merge_info; |
1458 | struct nfp_fl_payload *origin; |
1459 | u64 parent_ctx = 0; |
1460 | bool mod = false; |
1461 | int err; |
1462 | |
1463 | link = list_first_entry(&merge_flow->linked_flows, |
1464 | struct nfp_fl_payload_link, merge_flow.list); |
1465 | origin = link->sub_flow.flow; |
1466 | |
1467 | /* Re-add rule the merge had overwritten if it has not been deleted. */ |
1468 | if (origin != del_sub_flow) |
1469 | mod = true; |
1470 | |
1471 | err = nfp_modify_flow_metadata(app, nfp_flow: merge_flow); |
1472 | if (err) { |
1473 | nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n" ); |
1474 | goto err_free_links; |
1475 | } |
1476 | |
1477 | if (!mod) { |
1478 | err = nfp_flower_xmit_flow(app, nfp_flow: merge_flow, |
1479 | mtype: NFP_FLOWER_CMSG_TYPE_FLOW_DEL); |
1480 | if (err) { |
1481 | nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n" ); |
1482 | goto err_free_links; |
1483 | } |
1484 | } else { |
1485 | __nfp_modify_flow_metadata(priv, nfp_flow: origin); |
1486 | err = nfp_flower_xmit_flow(app, nfp_flow: origin, |
1487 | mtype: NFP_FLOWER_CMSG_TYPE_FLOW_MOD); |
1488 | if (err) |
1489 | nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n" ); |
1490 | origin->in_hw = true; |
1491 | } |
1492 | |
1493 | err_free_links: |
1494 | /* Clean any links connected with the merged flow. */ |
1495 | list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, |
1496 | merge_flow.list) { |
1497 | u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); |
1498 | |
1499 | parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); |
1500 | nfp_flower_unlink_flow(link); |
1501 | } |
1502 | |
1503 | merge_info = rhashtable_lookup_fast(ht: &priv->merge_table, |
1504 | key: &parent_ctx, |
1505 | params: merge_table_params); |
1506 | if (merge_info) { |
1507 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, |
1508 | &merge_info->ht_node, |
1509 | merge_table_params)); |
1510 | kfree(objp: merge_info); |
1511 | } |
1512 | |
1513 | kfree(objp: merge_flow->action_data); |
1514 | kfree(objp: merge_flow->mask_data); |
1515 | kfree(objp: merge_flow->unmasked_data); |
1516 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
1517 | &merge_flow->fl_node, |
1518 | nfp_flower_table_params)); |
1519 | kfree_rcu(merge_flow, rcu); |
1520 | } |
1521 | |
1522 | void |
1523 | nfp_flower_del_linked_merge_flows(struct nfp_app *app, |
1524 | struct nfp_fl_payload *sub_flow) |
1525 | { |
1526 | struct nfp_fl_payload_link *link, *temp; |
1527 | |
1528 | /* Remove any merge flow formed from the deleted sub_flow. */ |
1529 | list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, |
1530 | sub_flow.list) |
1531 | nfp_flower_remove_merge_flow(app, del_sub_flow: sub_flow, |
1532 | merge_flow: link->merge_flow.flow); |
1533 | } |
1534 | |
1535 | /** |
1536 | * nfp_flower_del_offload() - Removes a flow from hardware. |
1537 | * @app: Pointer to the APP handle |
1538 | * @netdev: netdev structure. |
1539 | * @flow: TC flower classifier offload structure |
1540 | * |
1541 | * Removes a flow from the repeated hash structure and clears the |
1542 | * action payload. Any flows merged from this are also deleted. |
1543 | * |
1544 | * Return: negative value on error, 0 if removed successfully. |
1545 | */ |
1546 | static int |
1547 | nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, |
1548 | struct flow_cls_offload *flow) |
1549 | { |
1550 | struct nfp_flower_priv *priv = app->priv; |
1551 | struct nfp_fl_ct_map_entry *ct_map_ent; |
1552 | struct netlink_ext_ack *extack = NULL; |
1553 | struct nfp_fl_payload *nfp_flow; |
1554 | struct nfp_port *port = NULL; |
1555 | int err; |
1556 | |
1557 | extack = flow->common.extack; |
1558 | if (nfp_netdev_is_nfp_repr(netdev)) |
1559 | port = nfp_port_from_netdev(netdev); |
1560 | |
1561 | /* Check ct_map_table */ |
1562 | ct_map_ent = rhashtable_lookup_fast(ht: &priv->ct_map_table, key: &flow->cookie, |
1563 | params: nfp_ct_map_params); |
1564 | if (ct_map_ent) { |
1565 | err = nfp_fl_ct_del_flow(ct_map_ent); |
1566 | return err; |
1567 | } |
1568 | |
1569 | nfp_flow = nfp_flower_search_fl_table(app, tc_flower_cookie: flow->cookie, netdev); |
1570 | if (!nfp_flow) { |
1571 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist" ); |
1572 | return -ENOENT; |
1573 | } |
1574 | |
1575 | err = nfp_modify_flow_metadata(app, nfp_flow); |
1576 | if (err) |
1577 | goto err_free_merge_flow; |
1578 | |
1579 | if (nfp_flow->nfp_tun_ipv4_addr) |
1580 | nfp_tunnel_del_ipv4_off(app, ipv4: nfp_flow->nfp_tun_ipv4_addr); |
1581 | |
1582 | if (nfp_flow->nfp_tun_ipv6) |
1583 | nfp_tunnel_put_ipv6_off(app, entry: nfp_flow->nfp_tun_ipv6); |
1584 | |
1585 | if (!nfp_flow->in_hw) { |
1586 | err = 0; |
1587 | goto err_free_merge_flow; |
1588 | } |
1589 | |
1590 | if (nfp_flow->pre_tun_rule.dev) { |
1591 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { |
1592 | struct nfp_predt_entry *predt; |
1593 | |
1594 | predt = nfp_flow->pre_tun_rule.predt; |
1595 | if (predt) { |
1596 | spin_lock_bh(lock: &priv->predt_lock); |
1597 | nfp_tun_unlink_and_update_nn_entries(app, predt); |
1598 | list_del(entry: &predt->list_head); |
1599 | spin_unlock_bh(lock: &priv->predt_lock); |
1600 | kfree(objp: predt); |
1601 | } |
1602 | } else { |
1603 | err = nfp_flower_xmit_pre_tun_del_flow(app, flow: nfp_flow); |
1604 | } |
1605 | } else { |
1606 | err = nfp_flower_xmit_flow(app, nfp_flow, |
1607 | mtype: NFP_FLOWER_CMSG_TYPE_FLOW_DEL); |
1608 | } |
1609 | /* Fall through on error. */ |
1610 | |
1611 | err_free_merge_flow: |
1612 | nfp_flower_del_linked_merge_flows(app, sub_flow: nfp_flow); |
1613 | if (port) |
1614 | port->tc_offload_cnt--; |
1615 | kfree(objp: nfp_flow->action_data); |
1616 | kfree(objp: nfp_flow->mask_data); |
1617 | kfree(objp: nfp_flow->unmasked_data); |
1618 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
1619 | &nfp_flow->fl_node, |
1620 | nfp_flower_table_params)); |
1621 | kfree_rcu(nfp_flow, rcu); |
1622 | return err; |
1623 | } |
1624 | |
1625 | static void |
1626 | __nfp_flower_update_merge_stats(struct nfp_app *app, |
1627 | struct nfp_fl_payload *merge_flow) |
1628 | { |
1629 | struct nfp_flower_priv *priv = app->priv; |
1630 | struct nfp_fl_payload_link *link; |
1631 | struct nfp_fl_payload *sub_flow; |
1632 | u64 pkts, bytes, used; |
1633 | u32 ctx_id; |
1634 | |
1635 | ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); |
1636 | pkts = priv->stats[ctx_id].pkts; |
1637 | /* Do not cycle subflows if no stats to distribute. */ |
1638 | if (!pkts) |
1639 | return; |
1640 | bytes = priv->stats[ctx_id].bytes; |
1641 | used = priv->stats[ctx_id].used; |
1642 | |
1643 | /* Reset stats for the merge flow. */ |
1644 | priv->stats[ctx_id].pkts = 0; |
1645 | priv->stats[ctx_id].bytes = 0; |
1646 | |
1647 | /* The merge flow has received stats updates from firmware. |
1648 | * Distribute these stats to all subflows that form the merge. |
1649 | * The stats will collected from TC via the subflows. |
1650 | */ |
1651 | list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { |
1652 | sub_flow = link->sub_flow.flow; |
1653 | ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); |
1654 | priv->stats[ctx_id].pkts += pkts; |
1655 | priv->stats[ctx_id].bytes += bytes; |
1656 | priv->stats[ctx_id].used = max_t(u64, used, |
1657 | priv->stats[ctx_id].used); |
1658 | } |
1659 | } |
1660 | |
1661 | void |
1662 | nfp_flower_update_merge_stats(struct nfp_app *app, |
1663 | struct nfp_fl_payload *sub_flow) |
1664 | { |
1665 | struct nfp_fl_payload_link *link; |
1666 | |
1667 | /* Get merge flows that the subflow forms to distribute their stats. */ |
1668 | list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) |
1669 | __nfp_flower_update_merge_stats(app, merge_flow: link->merge_flow.flow); |
1670 | } |
1671 | |
1672 | /** |
1673 | * nfp_flower_get_stats() - Populates flow stats obtained from hardware. |
1674 | * @app: Pointer to the APP handle |
1675 | * @netdev: Netdev structure. |
1676 | * @flow: TC flower classifier offload structure |
1677 | * |
1678 | * Populates a flow statistics structure which which corresponds to a |
1679 | * specific flow. |
1680 | * |
1681 | * Return: negative value on error, 0 if stats populated successfully. |
1682 | */ |
1683 | static int |
1684 | nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, |
1685 | struct flow_cls_offload *flow) |
1686 | { |
1687 | struct nfp_flower_priv *priv = app->priv; |
1688 | struct nfp_fl_ct_map_entry *ct_map_ent; |
1689 | struct netlink_ext_ack *extack = NULL; |
1690 | struct nfp_fl_payload *nfp_flow; |
1691 | u32 ctx_id; |
1692 | |
1693 | /* Check ct_map table first */ |
1694 | ct_map_ent = rhashtable_lookup_fast(ht: &priv->ct_map_table, key: &flow->cookie, |
1695 | params: nfp_ct_map_params); |
1696 | if (ct_map_ent) |
1697 | return nfp_fl_ct_stats(flow, ct_map_ent); |
1698 | |
1699 | extack = flow->common.extack; |
1700 | nfp_flow = nfp_flower_search_fl_table(app, tc_flower_cookie: flow->cookie, netdev); |
1701 | if (!nfp_flow) { |
1702 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist" ); |
1703 | return -EINVAL; |
1704 | } |
1705 | |
1706 | ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); |
1707 | |
1708 | spin_lock_bh(lock: &priv->stats_lock); |
1709 | /* If request is for a sub_flow, update stats from merged flows. */ |
1710 | if (!list_empty(head: &nfp_flow->linked_flows)) |
1711 | nfp_flower_update_merge_stats(app, sub_flow: nfp_flow); |
1712 | |
1713 | flow_stats_update(flow_stats: &flow->stats, bytes: priv->stats[ctx_id].bytes, |
1714 | pkts: priv->stats[ctx_id].pkts, drops: 0, lastused: priv->stats[ctx_id].used, |
1715 | used_hw_stats: FLOW_ACTION_HW_STATS_DELAYED); |
1716 | |
1717 | priv->stats[ctx_id].pkts = 0; |
1718 | priv->stats[ctx_id].bytes = 0; |
1719 | spin_unlock_bh(lock: &priv->stats_lock); |
1720 | |
1721 | return 0; |
1722 | } |
1723 | |
1724 | static int |
1725 | nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, |
1726 | struct flow_cls_offload *flower) |
1727 | { |
1728 | struct nfp_flower_priv *priv = app->priv; |
1729 | int ret; |
1730 | |
1731 | if (!eth_proto_is_802_3(proto: flower->common.protocol)) |
1732 | return -EOPNOTSUPP; |
1733 | |
1734 | mutex_lock(&priv->nfp_fl_lock); |
1735 | switch (flower->command) { |
1736 | case FLOW_CLS_REPLACE: |
1737 | ret = nfp_flower_add_offload(app, netdev, flow: flower); |
1738 | break; |
1739 | case FLOW_CLS_DESTROY: |
1740 | ret = nfp_flower_del_offload(app, netdev, flow: flower); |
1741 | break; |
1742 | case FLOW_CLS_STATS: |
1743 | ret = nfp_flower_get_stats(app, netdev, flow: flower); |
1744 | break; |
1745 | default: |
1746 | ret = -EOPNOTSUPP; |
1747 | break; |
1748 | } |
1749 | mutex_unlock(lock: &priv->nfp_fl_lock); |
1750 | |
1751 | return ret; |
1752 | } |
1753 | |
1754 | static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, |
1755 | void *type_data, void *cb_priv) |
1756 | { |
1757 | struct flow_cls_common_offload *common = type_data; |
1758 | struct nfp_repr *repr = cb_priv; |
1759 | |
1760 | if (!tc_can_offload_extack(dev: repr->netdev, extack: common->extack)) |
1761 | return -EOPNOTSUPP; |
1762 | |
1763 | switch (type) { |
1764 | case TC_SETUP_CLSFLOWER: |
1765 | return nfp_flower_repr_offload(app: repr->app, netdev: repr->netdev, |
1766 | flower: type_data); |
1767 | case TC_SETUP_CLSMATCHALL: |
1768 | return nfp_flower_setup_qos_offload(app: repr->app, netdev: repr->netdev, |
1769 | flow: type_data); |
1770 | default: |
1771 | return -EOPNOTSUPP; |
1772 | } |
1773 | } |
1774 | |
1775 | static LIST_HEAD(nfp_block_cb_list); |
1776 | |
1777 | static int nfp_flower_setup_tc_block(struct net_device *netdev, |
1778 | struct flow_block_offload *f) |
1779 | { |
1780 | struct nfp_repr *repr = netdev_priv(dev: netdev); |
1781 | struct nfp_flower_repr_priv *repr_priv; |
1782 | struct flow_block_cb *block_cb; |
1783 | |
1784 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
1785 | return -EOPNOTSUPP; |
1786 | |
1787 | repr_priv = repr->app_priv; |
1788 | repr_priv->block_shared = f->block_shared; |
1789 | f->driver_block_list = &nfp_block_cb_list; |
1790 | f->unlocked_driver_cb = true; |
1791 | |
1792 | switch (f->command) { |
1793 | case FLOW_BLOCK_BIND: |
1794 | if (flow_block_cb_is_busy(cb: nfp_flower_setup_tc_block_cb, cb_ident: repr, |
1795 | driver_block_list: &nfp_block_cb_list)) |
1796 | return -EBUSY; |
1797 | |
1798 | block_cb = flow_block_cb_alloc(cb: nfp_flower_setup_tc_block_cb, |
1799 | cb_ident: repr, cb_priv: repr, NULL); |
1800 | if (IS_ERR(ptr: block_cb)) |
1801 | return PTR_ERR(ptr: block_cb); |
1802 | |
1803 | flow_block_cb_add(block_cb, offload: f); |
1804 | list_add_tail(new: &block_cb->driver_list, head: &nfp_block_cb_list); |
1805 | return 0; |
1806 | case FLOW_BLOCK_UNBIND: |
1807 | block_cb = flow_block_cb_lookup(block: f->block, |
1808 | cb: nfp_flower_setup_tc_block_cb, |
1809 | cb_ident: repr); |
1810 | if (!block_cb) |
1811 | return -ENOENT; |
1812 | |
1813 | flow_block_cb_remove(block_cb, offload: f); |
1814 | list_del(entry: &block_cb->driver_list); |
1815 | return 0; |
1816 | default: |
1817 | return -EOPNOTSUPP; |
1818 | } |
1819 | } |
1820 | |
1821 | int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, |
1822 | enum tc_setup_type type, void *type_data) |
1823 | { |
1824 | switch (type) { |
1825 | case TC_SETUP_BLOCK: |
1826 | return nfp_flower_setup_tc_block(netdev, f: type_data); |
1827 | default: |
1828 | return -EOPNOTSUPP; |
1829 | } |
1830 | } |
1831 | |
1832 | struct nfp_flower_indr_block_cb_priv { |
1833 | struct net_device *netdev; |
1834 | struct nfp_app *app; |
1835 | struct list_head list; |
1836 | }; |
1837 | |
1838 | static struct nfp_flower_indr_block_cb_priv * |
1839 | nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, |
1840 | struct net_device *netdev) |
1841 | { |
1842 | struct nfp_flower_indr_block_cb_priv *cb_priv; |
1843 | struct nfp_flower_priv *priv = app->priv; |
1844 | |
1845 | list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) |
1846 | if (cb_priv->netdev == netdev) |
1847 | return cb_priv; |
1848 | |
1849 | return NULL; |
1850 | } |
1851 | |
1852 | static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, |
1853 | void *type_data, void *cb_priv) |
1854 | { |
1855 | struct nfp_flower_indr_block_cb_priv *priv = cb_priv; |
1856 | |
1857 | switch (type) { |
1858 | case TC_SETUP_CLSFLOWER: |
1859 | return nfp_flower_repr_offload(app: priv->app, netdev: priv->netdev, |
1860 | flower: type_data); |
1861 | default: |
1862 | return -EOPNOTSUPP; |
1863 | } |
1864 | } |
1865 | |
1866 | void nfp_flower_setup_indr_tc_release(void *cb_priv) |
1867 | { |
1868 | struct nfp_flower_indr_block_cb_priv *priv = cb_priv; |
1869 | |
1870 | list_del(entry: &priv->list); |
1871 | kfree(objp: priv); |
1872 | } |
1873 | |
1874 | static int |
1875 | nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, |
1876 | struct flow_block_offload *f, void *data, |
1877 | void (*cleanup)(struct flow_block_cb *block_cb)) |
1878 | { |
1879 | struct nfp_flower_indr_block_cb_priv *cb_priv; |
1880 | struct nfp_flower_priv *priv = app->priv; |
1881 | struct flow_block_cb *block_cb; |
1882 | |
1883 | if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
1884 | !nfp_flower_internal_port_can_offload(app, netdev)) || |
1885 | (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && |
1886 | nfp_flower_internal_port_can_offload(app, netdev))) |
1887 | return -EOPNOTSUPP; |
1888 | |
1889 | f->unlocked_driver_cb = true; |
1890 | |
1891 | switch (f->command) { |
1892 | case FLOW_BLOCK_BIND: |
1893 | cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
1894 | if (cb_priv && |
1895 | flow_block_cb_is_busy(cb: nfp_flower_setup_indr_block_cb, |
1896 | cb_ident: cb_priv, |
1897 | driver_block_list: &nfp_block_cb_list)) |
1898 | return -EBUSY; |
1899 | |
1900 | cb_priv = kmalloc(size: sizeof(*cb_priv), GFP_KERNEL); |
1901 | if (!cb_priv) |
1902 | return -ENOMEM; |
1903 | |
1904 | cb_priv->netdev = netdev; |
1905 | cb_priv->app = app; |
1906 | list_add(new: &cb_priv->list, head: &priv->indr_block_cb_priv); |
1907 | |
1908 | block_cb = flow_indr_block_cb_alloc(cb: nfp_flower_setup_indr_block_cb, |
1909 | cb_ident: cb_priv, cb_priv, |
1910 | release: nfp_flower_setup_indr_tc_release, |
1911 | bo: f, dev: netdev, sch, data, indr_cb_priv: app, cleanup); |
1912 | if (IS_ERR(ptr: block_cb)) { |
1913 | list_del(entry: &cb_priv->list); |
1914 | kfree(objp: cb_priv); |
1915 | return PTR_ERR(ptr: block_cb); |
1916 | } |
1917 | |
1918 | flow_block_cb_add(block_cb, offload: f); |
1919 | list_add_tail(new: &block_cb->driver_list, head: &nfp_block_cb_list); |
1920 | return 0; |
1921 | case FLOW_BLOCK_UNBIND: |
1922 | cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
1923 | if (!cb_priv) |
1924 | return -ENOENT; |
1925 | |
1926 | block_cb = flow_block_cb_lookup(block: f->block, |
1927 | cb: nfp_flower_setup_indr_block_cb, |
1928 | cb_ident: cb_priv); |
1929 | if (!block_cb) |
1930 | return -ENOENT; |
1931 | |
1932 | flow_indr_block_cb_remove(block_cb, offload: f); |
1933 | list_del(entry: &block_cb->driver_list); |
1934 | return 0; |
1935 | default: |
1936 | return -EOPNOTSUPP; |
1937 | } |
1938 | return 0; |
1939 | } |
1940 | |
1941 | static int |
1942 | nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data) |
1943 | { |
1944 | if (!data) |
1945 | return -EOPNOTSUPP; |
1946 | |
1947 | switch (type) { |
1948 | case TC_SETUP_ACT: |
1949 | return nfp_setup_tc_act_offload(app, fl_act: data); |
1950 | default: |
1951 | return -EOPNOTSUPP; |
1952 | } |
1953 | } |
1954 | |
1955 | int |
1956 | nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, |
1957 | enum tc_setup_type type, void *type_data, |
1958 | void *data, |
1959 | void (*cleanup)(struct flow_block_cb *block_cb)) |
1960 | { |
1961 | if (!netdev) |
1962 | return nfp_setup_tc_no_dev(app: cb_priv, type, data); |
1963 | |
1964 | if (!nfp_fl_is_netdev_to_offload(netdev)) |
1965 | return -EOPNOTSUPP; |
1966 | |
1967 | switch (type) { |
1968 | case TC_SETUP_BLOCK: |
1969 | return nfp_flower_setup_indr_tc_block(netdev, sch, app: cb_priv, |
1970 | f: type_data, data, cleanup); |
1971 | default: |
1972 | return -EOPNOTSUPP; |
1973 | } |
1974 | } |
1975 | |