1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ |
3 | |
4 | #include <linux/bitfield.h> |
5 | #include <linux/mpls.h> |
6 | #include <net/pkt_cls.h> |
7 | #include <net/tc_act/tc_csum.h> |
8 | #include <net/tc_act/tc_gact.h> |
9 | #include <net/tc_act/tc_mirred.h> |
10 | #include <net/tc_act/tc_mpls.h> |
11 | #include <net/tc_act/tc_pedit.h> |
12 | #include <net/tc_act/tc_vlan.h> |
13 | #include <net/tc_act/tc_tunnel_key.h> |
14 | |
15 | #include "cmsg.h" |
16 | #include "main.h" |
17 | #include "../nfp_net_repr.h" |
18 | |
19 | /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable |
20 | * to change. Such changes will break our FW ABI. |
21 | */ |
22 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) |
23 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) |
24 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) |
25 | #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \ |
26 | IP_TUNNEL_INFO_IPV6) |
27 | #define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ |
28 | NFP_FL_TUNNEL_KEY | \ |
29 | NFP_FL_TUNNEL_GENEVE_OPT) |
30 | |
31 | static int |
32 | nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls, |
33 | const struct flow_action_entry *act, |
34 | struct netlink_ext_ack *extack) |
35 | { |
36 | size_t act_size = sizeof(struct nfp_fl_push_mpls); |
37 | u32 mpls_lse = 0; |
38 | |
39 | push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; |
40 | push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
41 | |
42 | /* BOS is optional in the TC action but required for offload. */ |
43 | if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) { |
44 | mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT; |
45 | } else { |
46 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push" ); |
47 | return -EOPNOTSUPP; |
48 | } |
49 | |
50 | /* Leave MPLS TC as a default value of 0 if not explicitly set. */ |
51 | if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) |
52 | mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; |
53 | |
54 | /* Proto, label and TTL are enforced and verified for MPLS push. */ |
55 | mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT; |
56 | mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT; |
57 | push_mpls->ethtype = act->mpls_push.proto; |
58 | push_mpls->lse = cpu_to_be32(mpls_lse); |
59 | |
60 | return 0; |
61 | } |
62 | |
63 | static void |
64 | nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls, |
65 | const struct flow_action_entry *act) |
66 | { |
67 | size_t act_size = sizeof(struct nfp_fl_pop_mpls); |
68 | |
69 | pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; |
70 | pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
71 | pop_mpls->ethtype = act->mpls_pop.proto; |
72 | } |
73 | |
74 | static void |
75 | nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls, |
76 | const struct flow_action_entry *act) |
77 | { |
78 | size_t act_size = sizeof(struct nfp_fl_set_mpls); |
79 | u32 mpls_lse = 0, mpls_mask = 0; |
80 | |
81 | set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; |
82 | set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
83 | |
84 | if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) { |
85 | mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT; |
86 | mpls_mask |= MPLS_LS_LABEL_MASK; |
87 | } |
88 | if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { |
89 | mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; |
90 | mpls_mask |= MPLS_LS_TC_MASK; |
91 | } |
92 | if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) { |
93 | mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT; |
94 | mpls_mask |= MPLS_LS_S_MASK; |
95 | } |
96 | if (act->mpls_mangle.ttl) { |
97 | mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT; |
98 | mpls_mask |= MPLS_LS_TTL_MASK; |
99 | } |
100 | |
101 | set_mpls->lse = cpu_to_be32(mpls_lse); |
102 | set_mpls->lse_mask = cpu_to_be32(mpls_mask); |
103 | } |
104 | |
105 | static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) |
106 | { |
107 | size_t act_size = sizeof(struct nfp_fl_pop_vlan); |
108 | |
109 | pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN; |
110 | pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
111 | pop_vlan->reserved = 0; |
112 | } |
113 | |
114 | static void |
115 | nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, |
116 | const struct flow_action_entry *act) |
117 | { |
118 | size_t act_size = sizeof(struct nfp_fl_push_vlan); |
119 | u16 tmp_push_vlan_tci; |
120 | |
121 | push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; |
122 | push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
123 | push_vlan->reserved = 0; |
124 | push_vlan->vlan_tpid = act->vlan.proto; |
125 | |
126 | tmp_push_vlan_tci = |
127 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | |
128 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); |
129 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); |
130 | } |
131 | |
132 | static int |
133 | nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act, |
134 | struct nfp_fl_payload *nfp_flow, int act_len, |
135 | struct netlink_ext_ack *extack) |
136 | { |
137 | size_t act_size = sizeof(struct nfp_fl_pre_lag); |
138 | struct nfp_fl_pre_lag *pre_lag; |
139 | struct net_device *out_dev; |
140 | int err; |
141 | |
142 | out_dev = act->dev; |
143 | if (!out_dev || !netif_is_lag_master(dev: out_dev)) |
144 | return 0; |
145 | |
146 | if (act_len + act_size > NFP_FL_MAX_A_SIZ) { |
147 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action" ); |
148 | return -EOPNOTSUPP; |
149 | } |
150 | |
151 | /* Pre_lag action must be first on action list. |
152 | * If other actions already exist they need to be pushed forward. |
153 | */ |
154 | if (act_len) |
155 | memmove(nfp_flow->action_data + act_size, |
156 | nfp_flow->action_data, act_len); |
157 | |
158 | pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data; |
159 | err = nfp_flower_lag_populate_pre_action(app, master: out_dev, pre_act: pre_lag, extack); |
160 | if (err) |
161 | return err; |
162 | |
163 | pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG; |
164 | pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
165 | |
166 | nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
167 | |
168 | return act_size; |
169 | } |
170 | |
171 | static int |
172 | nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, |
173 | const struct flow_action_entry *act, |
174 | struct nfp_fl_payload *nfp_flow, |
175 | bool last, struct net_device *in_dev, |
176 | enum nfp_flower_tun_type tun_type, int *tun_out_cnt, |
177 | bool pkt_host, struct netlink_ext_ack *extack) |
178 | { |
179 | size_t act_size = sizeof(struct nfp_fl_output); |
180 | struct nfp_flower_priv *priv = app->priv; |
181 | struct net_device *out_dev; |
182 | u16 tmp_flags; |
183 | |
184 | output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; |
185 | output->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
186 | |
187 | out_dev = act->dev; |
188 | if (!out_dev) { |
189 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action" ); |
190 | return -EOPNOTSUPP; |
191 | } |
192 | |
193 | tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0; |
194 | |
195 | if (tun_type) { |
196 | /* Verify the egress netdev matches the tunnel type. */ |
197 | if (!nfp_fl_netdev_is_tunnel_type(netdev: out_dev, tun_type)) { |
198 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type" ); |
199 | return -EOPNOTSUPP; |
200 | } |
201 | |
202 | if (*tun_out_cnt) { |
203 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter" ); |
204 | return -EOPNOTSUPP; |
205 | } |
206 | (*tun_out_cnt)++; |
207 | |
208 | output->flags = cpu_to_be16(tmp_flags | |
209 | NFP_FL_OUT_FLAGS_USE_TUN); |
210 | output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); |
211 | } else if (netif_is_lag_master(dev: out_dev) && |
212 | priv->flower_en_feats & NFP_FL_ENABLE_LAG) { |
213 | int gid; |
214 | |
215 | output->flags = cpu_to_be16(tmp_flags); |
216 | gid = nfp_flower_lag_get_output_id(app, master: out_dev); |
217 | if (gid < 0) { |
218 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action" ); |
219 | return gid; |
220 | } |
221 | output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); |
222 | } else if (nfp_flower_internal_port_can_offload(app, netdev: out_dev)) { |
223 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES) && |
224 | !(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2)) { |
225 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware" ); |
226 | return -EOPNOTSUPP; |
227 | } |
228 | |
229 | if (nfp_flow->pre_tun_rule.dev || !pkt_host) { |
230 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action" ); |
231 | return -EOPNOTSUPP; |
232 | } |
233 | |
234 | nfp_flow->pre_tun_rule.dev = out_dev; |
235 | |
236 | return 0; |
237 | } else { |
238 | /* Set action output parameters. */ |
239 | output->flags = cpu_to_be16(tmp_flags); |
240 | |
241 | if (nfp_netdev_is_nfp_repr(netdev: in_dev)) { |
242 | /* Confirm ingress and egress are on same device. */ |
243 | if (!netdev_port_same_parent_id(a: in_dev, b: out_dev)) { |
244 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices" ); |
245 | return -EOPNOTSUPP; |
246 | } |
247 | } |
248 | |
249 | if (!nfp_netdev_is_nfp_repr(netdev: out_dev)) { |
250 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port" ); |
251 | return -EOPNOTSUPP; |
252 | } |
253 | |
254 | output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); |
255 | if (!output->port) { |
256 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface" ); |
257 | return -EOPNOTSUPP; |
258 | } |
259 | } |
260 | nfp_flow->meta.shortcut = output->port; |
261 | |
262 | return 0; |
263 | } |
264 | |
265 | static bool |
266 | nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx) |
267 | { |
268 | struct flow_action_entry *act = rule->action.entries; |
269 | int num_act = rule->action.num_entries; |
270 | int act_idx; |
271 | |
272 | /* Preparse action list for next mirred or redirect action */ |
273 | for (act_idx = start_idx + 1; act_idx < num_act; act_idx++) |
274 | if (act[act_idx].id == FLOW_ACTION_REDIRECT || |
275 | act[act_idx].id == FLOW_ACTION_MIRRED) |
276 | return netif_is_gretap(dev: act[act_idx].dev) || |
277 | netif_is_ip6gretap(dev: act[act_idx].dev); |
278 | |
279 | return false; |
280 | } |
281 | |
282 | static enum nfp_flower_tun_type |
283 | nfp_fl_get_tun_from_act(struct nfp_app *app, |
284 | struct flow_rule *rule, |
285 | const struct flow_action_entry *act, int act_idx) |
286 | { |
287 | const struct ip_tunnel_info *tun = act->tunnel; |
288 | struct nfp_flower_priv *priv = app->priv; |
289 | |
290 | /* Determine the tunnel type based on the egress netdev |
291 | * in the mirred action for tunnels without l4. |
292 | */ |
293 | if (nfp_flower_tun_is_gre(rule, start_idx: act_idx)) |
294 | return NFP_FL_TUNNEL_GRE; |
295 | |
296 | switch (tun->key.tp_dst) { |
297 | case htons(IANA_VXLAN_UDP_PORT): |
298 | return NFP_FL_TUNNEL_VXLAN; |
299 | case htons(GENEVE_UDP_PORT): |
300 | if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) |
301 | return NFP_FL_TUNNEL_GENEVE; |
302 | fallthrough; |
303 | default: |
304 | return NFP_FL_TUNNEL_NONE; |
305 | } |
306 | } |
307 | |
308 | static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) |
309 | { |
310 | size_t act_size = sizeof(struct nfp_fl_pre_tunnel); |
311 | struct nfp_fl_pre_tunnel *pre_tun_act; |
312 | |
313 | /* Pre_tunnel action must be first on action list. |
314 | * If other actions already exist they need to be pushed forward. |
315 | */ |
316 | if (act_len) |
317 | memmove(act_data + act_size, act_data, act_len); |
318 | |
319 | pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data; |
320 | |
321 | memset(pre_tun_act, 0, act_size); |
322 | |
323 | pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL; |
324 | pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
325 | |
326 | return pre_tun_act; |
327 | } |
328 | |
329 | static int |
330 | nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, |
331 | const struct flow_action_entry *act, |
332 | struct netlink_ext_ack *extack) |
333 | { |
334 | struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel; |
335 | int opt_len, opt_cnt, act_start, tot_push_len; |
336 | u8 *src = ip_tunnel_info_opts(ip_tun); |
337 | |
338 | /* We need to populate the options in reverse order for HW. |
339 | * Therefore we go through the options, calculating the |
340 | * number of options and the total size, then we populate |
341 | * them in reverse order in the action list. |
342 | */ |
343 | opt_cnt = 0; |
344 | tot_push_len = 0; |
345 | opt_len = ip_tun->options_len; |
346 | while (opt_len > 0) { |
347 | struct geneve_opt *opt = (struct geneve_opt *)src; |
348 | |
349 | opt_cnt++; |
350 | if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) { |
351 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded" ); |
352 | return -EOPNOTSUPP; |
353 | } |
354 | |
355 | tot_push_len += sizeof(struct nfp_fl_push_geneve) + |
356 | opt->length * 4; |
357 | if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) { |
358 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options" ); |
359 | return -EOPNOTSUPP; |
360 | } |
361 | |
362 | opt_len -= sizeof(struct geneve_opt) + opt->length * 4; |
363 | src += sizeof(struct geneve_opt) + opt->length * 4; |
364 | } |
365 | |
366 | if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) { |
367 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options" ); |
368 | return -EOPNOTSUPP; |
369 | } |
370 | |
371 | act_start = *list_len; |
372 | *list_len += tot_push_len; |
373 | src = ip_tunnel_info_opts(ip_tun); |
374 | while (opt_cnt) { |
375 | struct geneve_opt *opt = (struct geneve_opt *)src; |
376 | struct nfp_fl_push_geneve *push; |
377 | size_t act_size, len; |
378 | |
379 | opt_cnt--; |
380 | act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4; |
381 | tot_push_len -= act_size; |
382 | len = act_start + tot_push_len; |
383 | |
384 | push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len]; |
385 | push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE; |
386 | push->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
387 | push->reserved = 0; |
388 | push->class = opt->opt_class; |
389 | push->type = opt->type; |
390 | push->length = opt->length; |
391 | memcpy(&push->opt_data, opt->opt_data, opt->length * 4); |
392 | |
393 | src += sizeof(struct geneve_opt) + opt->length * 4; |
394 | } |
395 | |
396 | return 0; |
397 | } |
398 | |
399 | static int |
400 | nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, |
401 | const struct flow_action_entry *act, |
402 | struct nfp_fl_pre_tunnel *pre_tun, |
403 | enum nfp_flower_tun_type tun_type, |
404 | struct net_device *netdev, struct netlink_ext_ack *extack) |
405 | { |
406 | const struct ip_tunnel_info *ip_tun = act->tunnel; |
407 | bool ipv6 = ip_tunnel_info_af(tun_info: ip_tun) == AF_INET6; |
408 | size_t act_size = sizeof(struct nfp_fl_set_tun); |
409 | struct nfp_flower_priv *priv = app->priv; |
410 | u32 tmp_set_ip_tun_type_index = 0; |
411 | /* Currently support one pre-tunnel so index is always 0. */ |
412 | int pretun_idx = 0; |
413 | |
414 | if (!IS_ENABLED(CONFIG_IPV6) && ipv6) |
415 | return -EOPNOTSUPP; |
416 | |
417 | if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) |
418 | return -EOPNOTSUPP; |
419 | |
420 | BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || |
421 | NFP_FL_TUNNEL_KEY != TUNNEL_KEY || |
422 | NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); |
423 | if (ip_tun->options_len && |
424 | (tun_type != NFP_FL_TUNNEL_GENEVE || |
425 | !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) { |
426 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload" ); |
427 | return -EOPNOTSUPP; |
428 | } |
429 | |
430 | if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) { |
431 | NL_SET_ERR_MSG_MOD(extack, |
432 | "unsupported offload: loaded firmware does not support tunnel flag offload" ); |
433 | return -EOPNOTSUPP; |
434 | } |
435 | |
436 | set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL; |
437 | set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
438 | |
439 | /* Set tunnel type and pre-tunnel index. */ |
440 | tmp_set_ip_tun_type_index |= |
441 | FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) | |
442 | FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx); |
443 | |
444 | set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); |
445 | if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) |
446 | set_tun->tun_id = ip_tun->key.tun_id; |
447 | |
448 | if (ip_tun->key.ttl) { |
449 | set_tun->ttl = ip_tun->key.ttl; |
450 | #ifdef CONFIG_IPV6 |
451 | } else if (ipv6) { |
452 | struct net *net = dev_net(dev: netdev); |
453 | struct flowi6 flow = {}; |
454 | struct dst_entry *dst; |
455 | |
456 | flow.daddr = ip_tun->key.u.ipv6.dst; |
457 | flow.flowi4_proto = IPPROTO_UDP; |
458 | dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL); |
459 | if (!IS_ERR(ptr: dst)) { |
460 | set_tun->ttl = ip6_dst_hoplimit(dst); |
461 | dst_release(dst); |
462 | } else { |
463 | set_tun->ttl = net->ipv6.devconf_all->hop_limit; |
464 | } |
465 | #endif |
466 | } else { |
467 | struct net *net = dev_net(dev: netdev); |
468 | struct flowi4 flow = {}; |
469 | struct rtable *rt; |
470 | int err; |
471 | |
472 | /* Do a route lookup to determine ttl - if fails then use |
473 | * default. Note that CONFIG_INET is a requirement of |
474 | * CONFIG_NET_SWITCHDEV so must be defined here. |
475 | */ |
476 | flow.daddr = ip_tun->key.u.ipv4.dst; |
477 | flow.flowi4_proto = IPPROTO_UDP; |
478 | rt = ip_route_output_key(net, flp: &flow); |
479 | err = PTR_ERR_OR_ZERO(ptr: rt); |
480 | if (!err) { |
481 | set_tun->ttl = ip4_dst_hoplimit(dst: &rt->dst); |
482 | ip_rt_put(rt); |
483 | } else { |
484 | set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); |
485 | } |
486 | } |
487 | |
488 | set_tun->tos = ip_tun->key.tos; |
489 | set_tun->tun_flags = ip_tun->key.tun_flags; |
490 | |
491 | if (tun_type == NFP_FL_TUNNEL_GENEVE) { |
492 | set_tun->tun_proto = htons(ETH_P_TEB); |
493 | set_tun->tun_len = ip_tun->options_len / 4; |
494 | } |
495 | |
496 | /* Complete pre_tunnel action. */ |
497 | if (ipv6) { |
498 | pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6); |
499 | pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst; |
500 | } else { |
501 | pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; |
502 | } |
503 | |
504 | return 0; |
505 | } |
506 | |
507 | static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask) |
508 | { |
509 | u32 oldvalue = get_unaligned((u32 *)p_exact); |
510 | u32 oldmask = get_unaligned((u32 *)p_mask); |
511 | |
512 | value &= mask; |
513 | value |= oldvalue & ~mask; |
514 | |
515 | put_unaligned(oldmask | mask, (u32 *)p_mask); |
516 | put_unaligned(value, (u32 *)p_exact); |
517 | } |
518 | |
519 | static int |
520 | nfp_fl_set_eth(const struct flow_action_entry *act, u32 off, |
521 | struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack) |
522 | { |
523 | u32 exact, mask; |
524 | |
525 | if (off + 4 > ETH_ALEN * 2) { |
526 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action" ); |
527 | return -EOPNOTSUPP; |
528 | } |
529 | |
530 | mask = ~act->mangle.mask; |
531 | exact = act->mangle.val; |
532 | |
533 | if (exact & ~mask) { |
534 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action" ); |
535 | return -EOPNOTSUPP; |
536 | } |
537 | |
538 | nfp_fl_set_helper32(value: exact, mask, p_exact: &set_eth->eth_addr_val[off], |
539 | p_mask: &set_eth->eth_addr_mask[off]); |
540 | |
541 | set_eth->reserved = cpu_to_be16(0); |
542 | set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET; |
543 | set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ; |
544 | |
545 | return 0; |
546 | } |
547 | |
548 | struct ipv4_ttl_word { |
549 | __u8 ttl; |
550 | __u8 protocol; |
551 | __sum16 check; |
552 | }; |
553 | |
554 | static int |
555 | nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off, |
556 | struct nfp_fl_set_ip4_addrs *set_ip_addr, |
557 | struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos, |
558 | struct netlink_ext_ack *extack) |
559 | { |
560 | struct ipv4_ttl_word *ttl_word_mask; |
561 | struct ipv4_ttl_word *ttl_word; |
562 | struct iphdr *tos_word_mask; |
563 | struct iphdr *tos_word; |
564 | __be32 exact, mask; |
565 | |
566 | /* We are expecting tcf_pedit to return a big endian value */ |
567 | mask = (__force __be32)~act->mangle.mask; |
568 | exact = (__force __be32)act->mangle.val; |
569 | |
570 | if (exact & ~mask) { |
571 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action" ); |
572 | return -EOPNOTSUPP; |
573 | } |
574 | |
575 | switch (off) { |
576 | case offsetof(struct iphdr, daddr): |
577 | set_ip_addr->ipv4_dst_mask |= mask; |
578 | set_ip_addr->ipv4_dst &= ~mask; |
579 | set_ip_addr->ipv4_dst |= exact & mask; |
580 | set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; |
581 | set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> |
582 | NFP_FL_LW_SIZ; |
583 | break; |
584 | case offsetof(struct iphdr, saddr): |
585 | set_ip_addr->ipv4_src_mask |= mask; |
586 | set_ip_addr->ipv4_src &= ~mask; |
587 | set_ip_addr->ipv4_src |= exact & mask; |
588 | set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; |
589 | set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> |
590 | NFP_FL_LW_SIZ; |
591 | break; |
592 | case offsetof(struct iphdr, ttl): |
593 | ttl_word_mask = (struct ipv4_ttl_word *)&mask; |
594 | ttl_word = (struct ipv4_ttl_word *)&exact; |
595 | |
596 | if (ttl_word_mask->protocol || ttl_word_mask->check) { |
597 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action" ); |
598 | return -EOPNOTSUPP; |
599 | } |
600 | |
601 | set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl; |
602 | set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl; |
603 | set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl; |
604 | set_ip_ttl_tos->head.jump_id = |
605 | NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; |
606 | set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> |
607 | NFP_FL_LW_SIZ; |
608 | break; |
609 | case round_down(offsetof(struct iphdr, tos), 4): |
610 | tos_word_mask = (struct iphdr *)&mask; |
611 | tos_word = (struct iphdr *)&exact; |
612 | |
613 | if (tos_word_mask->version || tos_word_mask->ihl || |
614 | tos_word_mask->tot_len) { |
615 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action" ); |
616 | return -EOPNOTSUPP; |
617 | } |
618 | |
619 | set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos; |
620 | set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos; |
621 | set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos; |
622 | set_ip_ttl_tos->head.jump_id = |
623 | NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; |
624 | set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> |
625 | NFP_FL_LW_SIZ; |
626 | break; |
627 | default: |
628 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header" ); |
629 | return -EOPNOTSUPP; |
630 | } |
631 | |
632 | return 0; |
633 | } |
634 | |
635 | static void |
636 | nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, |
637 | struct nfp_fl_set_ipv6_addr *ip6) |
638 | { |
639 | ip6->ipv6[word].mask |= mask; |
640 | ip6->ipv6[word].exact &= ~mask; |
641 | ip6->ipv6[word].exact |= exact & mask; |
642 | |
643 | ip6->reserved = cpu_to_be16(0); |
644 | ip6->head.jump_id = opcode_tag; |
645 | ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; |
646 | } |
647 | |
648 | struct ipv6_hop_limit_word { |
649 | __be16 payload_len; |
650 | u8 nexthdr; |
651 | u8 hop_limit; |
652 | }; |
653 | |
654 | static int |
655 | nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, |
656 | struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, |
657 | struct netlink_ext_ack *extack) |
658 | { |
659 | struct ipv6_hop_limit_word *fl_hl_mask; |
660 | struct ipv6_hop_limit_word *fl_hl; |
661 | |
662 | switch (off) { |
663 | case offsetof(struct ipv6hdr, payload_len): |
664 | fl_hl_mask = (struct ipv6_hop_limit_word *)&mask; |
665 | fl_hl = (struct ipv6_hop_limit_word *)&exact; |
666 | |
667 | if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) { |
668 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action" ); |
669 | return -EOPNOTSUPP; |
670 | } |
671 | |
672 | ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit; |
673 | ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit; |
674 | ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit & |
675 | fl_hl_mask->hop_limit; |
676 | break; |
677 | case round_down(offsetof(struct ipv6hdr, flow_lbl), 4): |
678 | if (mask & ~IPV6_FLOWINFO_MASK || |
679 | exact & ~IPV6_FLOWINFO_MASK) { |
680 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow info action" ); |
681 | return -EOPNOTSUPP; |
682 | } |
683 | |
684 | ip_hl_fl->ipv6_label_mask |= mask; |
685 | ip_hl_fl->ipv6_label &= ~mask; |
686 | ip_hl_fl->ipv6_label |= exact & mask; |
687 | break; |
688 | } |
689 | |
690 | ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; |
691 | ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | static int |
697 | nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off, |
698 | struct nfp_fl_set_ipv6_addr *ip_dst, |
699 | struct nfp_fl_set_ipv6_addr *ip_src, |
700 | struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, |
701 | struct netlink_ext_ack *extack) |
702 | { |
703 | __be32 exact, mask; |
704 | int err = 0; |
705 | u8 word; |
706 | |
707 | /* We are expecting tcf_pedit to return a big endian value */ |
708 | mask = (__force __be32)~act->mangle.mask; |
709 | exact = (__force __be32)act->mangle.val; |
710 | |
711 | if (exact & ~mask) { |
712 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action" ); |
713 | return -EOPNOTSUPP; |
714 | } |
715 | |
716 | if (off < offsetof(struct ipv6hdr, saddr)) { |
717 | err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask, |
718 | ip_hl_fl, extack); |
719 | } else if (off < offsetof(struct ipv6hdr, daddr)) { |
720 | word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); |
721 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, |
722 | exact, mask, ip6: ip_src); |
723 | } else if (off < offsetof(struct ipv6hdr, daddr) + |
724 | sizeof(struct in6_addr)) { |
725 | word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact); |
726 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word, |
727 | exact, mask, ip6: ip_dst); |
728 | } else { |
729 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header" ); |
730 | return -EOPNOTSUPP; |
731 | } |
732 | |
733 | return err; |
734 | } |
735 | |
736 | static int |
737 | nfp_fl_set_tport(const struct flow_action_entry *act, u32 off, |
738 | struct nfp_fl_set_tport *set_tport, int opcode, |
739 | struct netlink_ext_ack *extack) |
740 | { |
741 | u32 exact, mask; |
742 | |
743 | if (off) { |
744 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header" ); |
745 | return -EOPNOTSUPP; |
746 | } |
747 | |
748 | mask = ~act->mangle.mask; |
749 | exact = act->mangle.val; |
750 | |
751 | if (exact & ~mask) { |
752 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action" ); |
753 | return -EOPNOTSUPP; |
754 | } |
755 | |
756 | nfp_fl_set_helper32(value: exact, mask, p_exact: set_tport->tp_port_val, |
757 | p_mask: set_tport->tp_port_mask); |
758 | |
759 | set_tport->reserved = cpu_to_be16(0); |
760 | set_tport->head.jump_id = opcode; |
761 | set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ; |
762 | |
763 | return 0; |
764 | } |
765 | |
766 | static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) |
767 | { |
768 | switch (ip_proto) { |
769 | case 0: |
770 | /* Filter doesn't force proto match, |
771 | * both TCP and UDP will be updated if encountered |
772 | */ |
773 | return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; |
774 | case IPPROTO_TCP: |
775 | return TCA_CSUM_UPDATE_FLAG_TCP; |
776 | case IPPROTO_UDP: |
777 | return TCA_CSUM_UPDATE_FLAG_UDP; |
778 | default: |
779 | /* All other protocols will be ignored by FW */ |
780 | return 0; |
781 | } |
782 | } |
783 | |
784 | struct nfp_flower_pedit_acts { |
785 | struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; |
786 | struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; |
787 | struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; |
788 | struct nfp_fl_set_ip4_addrs set_ip_addr; |
789 | struct nfp_fl_set_tport set_tport; |
790 | struct nfp_fl_set_eth set_eth; |
791 | }; |
792 | |
793 | static int |
794 | nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action, |
795 | int *a_len, struct nfp_flower_pedit_acts *set_act, |
796 | u32 *csum_updated) |
797 | { |
798 | size_t act_size = 0; |
799 | u8 ip_proto = 0; |
800 | |
801 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) { |
802 | struct flow_match_basic match; |
803 | |
804 | flow_rule_match_basic(rule, out: &match); |
805 | ip_proto = match.key->ip_proto; |
806 | } |
807 | |
808 | if (set_act->set_eth.head.len_lw) { |
809 | act_size = sizeof(set_act->set_eth); |
810 | memcpy(nfp_action, &set_act->set_eth, act_size); |
811 | *a_len += act_size; |
812 | } |
813 | |
814 | if (set_act->set_ip_ttl_tos.head.len_lw) { |
815 | nfp_action += act_size; |
816 | act_size = sizeof(set_act->set_ip_ttl_tos); |
817 | memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size); |
818 | *a_len += act_size; |
819 | |
820 | /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ |
821 | *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | |
822 | nfp_fl_csum_l4_to_flag(ip_proto); |
823 | } |
824 | |
825 | if (set_act->set_ip_addr.head.len_lw) { |
826 | nfp_action += act_size; |
827 | act_size = sizeof(set_act->set_ip_addr); |
828 | memcpy(nfp_action, &set_act->set_ip_addr, act_size); |
829 | *a_len += act_size; |
830 | |
831 | /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ |
832 | *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | |
833 | nfp_fl_csum_l4_to_flag(ip_proto); |
834 | } |
835 | |
836 | if (set_act->set_ip6_tc_hl_fl.head.len_lw) { |
837 | nfp_action += act_size; |
838 | act_size = sizeof(set_act->set_ip6_tc_hl_fl); |
839 | memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size); |
840 | *a_len += act_size; |
841 | |
842 | /* Hardware will automatically fix TCP/UDP checksum. */ |
843 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
844 | } |
845 | |
846 | if (set_act->set_ip6_dst.head.len_lw && |
847 | set_act->set_ip6_src.head.len_lw) { |
848 | /* TC compiles set src and dst IPv6 address as a single action, |
849 | * the hardware requires this to be 2 separate actions. |
850 | */ |
851 | nfp_action += act_size; |
852 | act_size = sizeof(set_act->set_ip6_src); |
853 | memcpy(nfp_action, &set_act->set_ip6_src, act_size); |
854 | *a_len += act_size; |
855 | |
856 | act_size = sizeof(set_act->set_ip6_dst); |
857 | memcpy(&nfp_action[sizeof(set_act->set_ip6_src)], |
858 | &set_act->set_ip6_dst, act_size); |
859 | *a_len += act_size; |
860 | |
861 | /* Hardware will automatically fix TCP/UDP checksum. */ |
862 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
863 | } else if (set_act->set_ip6_dst.head.len_lw) { |
864 | nfp_action += act_size; |
865 | act_size = sizeof(set_act->set_ip6_dst); |
866 | memcpy(nfp_action, &set_act->set_ip6_dst, act_size); |
867 | *a_len += act_size; |
868 | |
869 | /* Hardware will automatically fix TCP/UDP checksum. */ |
870 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
871 | } else if (set_act->set_ip6_src.head.len_lw) { |
872 | nfp_action += act_size; |
873 | act_size = sizeof(set_act->set_ip6_src); |
874 | memcpy(nfp_action, &set_act->set_ip6_src, act_size); |
875 | *a_len += act_size; |
876 | |
877 | /* Hardware will automatically fix TCP/UDP checksum. */ |
878 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
879 | } |
880 | if (set_act->set_tport.head.len_lw) { |
881 | nfp_action += act_size; |
882 | act_size = sizeof(set_act->set_tport); |
883 | memcpy(nfp_action, &set_act->set_tport, act_size); |
884 | *a_len += act_size; |
885 | |
886 | /* Hardware will automatically fix TCP/UDP checksum. */ |
887 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
888 | } |
889 | |
890 | return 0; |
891 | } |
892 | |
893 | static int |
894 | nfp_fl_pedit(const struct flow_action_entry *act, |
895 | char *nfp_action, int *a_len, |
896 | u32 *csum_updated, struct nfp_flower_pedit_acts *set_act, |
897 | struct netlink_ext_ack *extack) |
898 | { |
899 | enum flow_action_mangle_base htype; |
900 | u32 offset; |
901 | |
902 | htype = act->mangle.htype; |
903 | offset = act->mangle.offset; |
904 | |
905 | switch (htype) { |
906 | case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: |
907 | return nfp_fl_set_eth(act, off: offset, set_eth: &set_act->set_eth, extack); |
908 | case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: |
909 | return nfp_fl_set_ip4(act, off: offset, set_ip_addr: &set_act->set_ip_addr, |
910 | set_ip_ttl_tos: &set_act->set_ip_ttl_tos, extack); |
911 | case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: |
912 | return nfp_fl_set_ip6(act, off: offset, ip_dst: &set_act->set_ip6_dst, |
913 | ip_src: &set_act->set_ip6_src, |
914 | ip_hl_fl: &set_act->set_ip6_tc_hl_fl, extack); |
915 | case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: |
916 | return nfp_fl_set_tport(act, off: offset, set_tport: &set_act->set_tport, |
917 | NFP_FL_ACTION_OPCODE_SET_TCP, extack); |
918 | case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: |
919 | return nfp_fl_set_tport(act, off: offset, set_tport: &set_act->set_tport, |
920 | NFP_FL_ACTION_OPCODE_SET_UDP, extack); |
921 | default: |
922 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header" ); |
923 | return -EOPNOTSUPP; |
924 | } |
925 | } |
926 | |
927 | static struct nfp_fl_meter *nfp_fl_meter(char *act_data) |
928 | { |
929 | size_t act_size = sizeof(struct nfp_fl_meter); |
930 | struct nfp_fl_meter *meter_act; |
931 | |
932 | meter_act = (struct nfp_fl_meter *)act_data; |
933 | |
934 | memset(meter_act, 0, act_size); |
935 | |
936 | meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER; |
937 | meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
938 | |
939 | return meter_act; |
940 | } |
941 | |
942 | static int |
943 | nfp_flower_meter_action(struct nfp_app *app, |
944 | const struct flow_action_entry *action, |
945 | struct nfp_fl_payload *nfp_fl, int *a_len, |
946 | struct net_device *netdev, |
947 | struct netlink_ext_ack *extack) |
948 | { |
949 | struct nfp_fl_meter *fl_meter; |
950 | u32 meter_id; |
951 | |
952 | if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) { |
953 | NL_SET_ERR_MSG_MOD(extack, |
954 | "unsupported offload:meter action size beyond the allowed maximum" ); |
955 | return -EOPNOTSUPP; |
956 | } |
957 | |
958 | meter_id = action->hw_index; |
959 | if (!nfp_flower_search_meter_entry(app, meter_id)) { |
960 | NL_SET_ERR_MSG_MOD(extack, |
961 | "can not offload flow table with unsupported police action." ); |
962 | return -EOPNOTSUPP; |
963 | } |
964 | |
965 | fl_meter = nfp_fl_meter(act_data: &nfp_fl->action_data[*a_len]); |
966 | *a_len += sizeof(struct nfp_fl_meter); |
967 | fl_meter->meter_id = cpu_to_be32(meter_id); |
968 | |
969 | return 0; |
970 | } |
971 | |
972 | static int |
973 | nfp_flower_output_action(struct nfp_app *app, |
974 | const struct flow_action_entry *act, |
975 | struct nfp_fl_payload *nfp_fl, int *a_len, |
976 | struct net_device *netdev, bool last, |
977 | enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, |
978 | int *out_cnt, u32 *csum_updated, bool pkt_host, |
979 | struct netlink_ext_ack *extack) |
980 | { |
981 | struct nfp_flower_priv *priv = app->priv; |
982 | struct nfp_fl_output *output; |
983 | int err, prelag_size; |
984 | |
985 | /* If csum_updated has not been reset by now, it means HW will |
986 | * incorrectly update csums when they are not requested. |
987 | */ |
988 | if (*csum_updated) { |
989 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported" ); |
990 | return -EOPNOTSUPP; |
991 | } |
992 | |
993 | if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) { |
994 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum" ); |
995 | return -EOPNOTSUPP; |
996 | } |
997 | |
998 | output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; |
999 | err = nfp_fl_output(app, output, act, nfp_flow: nfp_fl, last, in_dev: netdev, tun_type: *tun_type, |
1000 | tun_out_cnt, pkt_host, extack); |
1001 | if (err) |
1002 | return err; |
1003 | |
1004 | *a_len += sizeof(struct nfp_fl_output); |
1005 | |
1006 | if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) { |
1007 | /* nfp_fl_pre_lag returns -err or size of prelag action added. |
1008 | * This will be 0 if it is not egressing to a lag dev. |
1009 | */ |
1010 | prelag_size = nfp_fl_pre_lag(app, act, nfp_flow: nfp_fl, act_len: *a_len, extack); |
1011 | if (prelag_size < 0) { |
1012 | return prelag_size; |
1013 | } else if (prelag_size > 0 && (!last || *out_cnt)) { |
1014 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list" ); |
1015 | return -EOPNOTSUPP; |
1016 | } |
1017 | |
1018 | *a_len += prelag_size; |
1019 | } |
1020 | (*out_cnt)++; |
1021 | |
1022 | return 0; |
1023 | } |
1024 | |
1025 | static int |
1026 | nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, |
1027 | struct flow_rule *rule, |
1028 | struct nfp_fl_payload *nfp_fl, int *a_len, |
1029 | struct net_device *netdev, |
1030 | enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, |
1031 | int *out_cnt, u32 *csum_updated, |
1032 | struct nfp_flower_pedit_acts *set_act, bool *pkt_host, |
1033 | struct netlink_ext_ack *extack, int act_idx) |
1034 | { |
1035 | struct nfp_flower_priv *fl_priv = app->priv; |
1036 | struct nfp_fl_pre_tunnel *pre_tun; |
1037 | struct nfp_fl_set_tun *set_tun; |
1038 | struct nfp_fl_push_vlan *psh_v; |
1039 | struct nfp_fl_push_mpls *psh_m; |
1040 | struct nfp_fl_pop_vlan *pop_v; |
1041 | struct nfp_fl_pop_mpls *pop_m; |
1042 | struct nfp_fl_set_mpls *set_m; |
1043 | int err; |
1044 | |
1045 | switch (act->id) { |
1046 | case FLOW_ACTION_DROP: |
1047 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); |
1048 | break; |
1049 | case FLOW_ACTION_REDIRECT_INGRESS: |
1050 | case FLOW_ACTION_REDIRECT: |
1051 | err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, |
1052 | last: true, tun_type, tun_out_cnt, |
1053 | out_cnt, csum_updated, pkt_host: *pkt_host, |
1054 | extack); |
1055 | if (err) |
1056 | return err; |
1057 | break; |
1058 | case FLOW_ACTION_MIRRED_INGRESS: |
1059 | case FLOW_ACTION_MIRRED: |
1060 | err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, |
1061 | last: false, tun_type, tun_out_cnt, |
1062 | out_cnt, csum_updated, pkt_host: *pkt_host, |
1063 | extack); |
1064 | if (err) |
1065 | return err; |
1066 | break; |
1067 | case FLOW_ACTION_VLAN_POP: |
1068 | if (*a_len + |
1069 | sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) { |
1070 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan" ); |
1071 | return -EOPNOTSUPP; |
1072 | } |
1073 | |
1074 | pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len]; |
1075 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV); |
1076 | |
1077 | nfp_fl_pop_vlan(pop_vlan: pop_v); |
1078 | *a_len += sizeof(struct nfp_fl_pop_vlan); |
1079 | break; |
1080 | case FLOW_ACTION_VLAN_PUSH: |
1081 | if (*a_len + |
1082 | sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) { |
1083 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan" ); |
1084 | return -EOPNOTSUPP; |
1085 | } |
1086 | |
1087 | psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len]; |
1088 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1089 | |
1090 | nfp_fl_push_vlan(push_vlan: psh_v, act); |
1091 | *a_len += sizeof(struct nfp_fl_push_vlan); |
1092 | break; |
1093 | case FLOW_ACTION_TUNNEL_ENCAP: { |
1094 | const struct ip_tunnel_info *ip_tun = act->tunnel; |
1095 | |
1096 | *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx); |
1097 | if (*tun_type == NFP_FL_TUNNEL_NONE) { |
1098 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list" ); |
1099 | return -EOPNOTSUPP; |
1100 | } |
1101 | |
1102 | if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) { |
1103 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list" ); |
1104 | return -EOPNOTSUPP; |
1105 | } |
1106 | |
1107 | /* Pre-tunnel action is required for tunnel encap. |
1108 | * This checks for next hop entries on NFP. |
1109 | * If none, the packet falls back before applying other actions. |
1110 | */ |
1111 | if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + |
1112 | sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) { |
1113 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap" ); |
1114 | return -EOPNOTSUPP; |
1115 | } |
1116 | |
1117 | pre_tun = nfp_fl_pre_tunnel(act_data: nfp_fl->action_data, act_len: *a_len); |
1118 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1119 | *a_len += sizeof(struct nfp_fl_pre_tunnel); |
1120 | |
1121 | err = nfp_fl_push_geneve_options(nfp_fl, list_len: a_len, act, extack); |
1122 | if (err) |
1123 | return err; |
1124 | |
1125 | set_tun = (void *)&nfp_fl->action_data[*a_len]; |
1126 | err = nfp_fl_set_tun(app, set_tun, act, pre_tun, tun_type: *tun_type, |
1127 | netdev, extack); |
1128 | if (err) |
1129 | return err; |
1130 | *a_len += sizeof(struct nfp_fl_set_tun); |
1131 | } |
1132 | break; |
1133 | case FLOW_ACTION_TUNNEL_DECAP: |
1134 | /* Tunnel decap is handled by default so accept action. */ |
1135 | return 0; |
1136 | case FLOW_ACTION_MANGLE: |
1137 | if (nfp_fl_pedit(act, nfp_action: &nfp_fl->action_data[*a_len], |
1138 | a_len, csum_updated, set_act, extack)) |
1139 | return -EOPNOTSUPP; |
1140 | break; |
1141 | case FLOW_ACTION_CSUM: |
1142 | /* csum action requests recalc of something we have not fixed */ |
1143 | if (act->csum_flags & ~*csum_updated) { |
1144 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list" ); |
1145 | return -EOPNOTSUPP; |
1146 | } |
1147 | /* If we will correctly fix the csum we can remove it from the |
1148 | * csum update list. Which will later be used to check support. |
1149 | */ |
1150 | *csum_updated &= ~act->csum_flags; |
1151 | break; |
1152 | case FLOW_ACTION_MPLS_PUSH: |
1153 | if (*a_len + |
1154 | sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) { |
1155 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS" ); |
1156 | return -EOPNOTSUPP; |
1157 | } |
1158 | |
1159 | psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len]; |
1160 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1161 | |
1162 | err = nfp_fl_push_mpls(push_mpls: psh_m, act, extack); |
1163 | if (err) |
1164 | return err; |
1165 | *a_len += sizeof(struct nfp_fl_push_mpls); |
1166 | break; |
1167 | case FLOW_ACTION_MPLS_POP: |
1168 | if (*a_len + |
1169 | sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) { |
1170 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS" ); |
1171 | return -EOPNOTSUPP; |
1172 | } |
1173 | |
1174 | pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len]; |
1175 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1176 | |
1177 | nfp_fl_pop_mpls(pop_mpls: pop_m, act); |
1178 | *a_len += sizeof(struct nfp_fl_pop_mpls); |
1179 | break; |
1180 | case FLOW_ACTION_MPLS_MANGLE: |
1181 | if (*a_len + |
1182 | sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) { |
1183 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS" ); |
1184 | return -EOPNOTSUPP; |
1185 | } |
1186 | |
1187 | set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len]; |
1188 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1189 | |
1190 | nfp_fl_set_mpls(set_mpls: set_m, act); |
1191 | *a_len += sizeof(struct nfp_fl_set_mpls); |
1192 | break; |
1193 | case FLOW_ACTION_PTYPE: |
1194 | /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ |
1195 | if (act->ptype != PACKET_HOST) |
1196 | return -EOPNOTSUPP; |
1197 | |
1198 | *pkt_host = true; |
1199 | break; |
1200 | case FLOW_ACTION_POLICE: |
1201 | if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) { |
1202 | NL_SET_ERR_MSG_MOD(extack, |
1203 | "unsupported offload: unsupported police action in action list" ); |
1204 | return -EOPNOTSUPP; |
1205 | } |
1206 | |
1207 | err = nfp_flower_meter_action(app, action: act, nfp_fl, a_len, netdev, |
1208 | extack); |
1209 | if (err) |
1210 | return err; |
1211 | break; |
1212 | default: |
1213 | /* Currently we do not handle any other actions. */ |
1214 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list" ); |
1215 | return -EOPNOTSUPP; |
1216 | } |
1217 | |
1218 | return 0; |
1219 | } |
1220 | |
1221 | static bool nfp_fl_check_mangle_start(struct flow_action *flow_act, |
1222 | int current_act_idx) |
1223 | { |
1224 | struct flow_action_entry current_act; |
1225 | struct flow_action_entry prev_act; |
1226 | |
1227 | current_act = flow_act->entries[current_act_idx]; |
1228 | if (current_act.id != FLOW_ACTION_MANGLE) |
1229 | return false; |
1230 | |
1231 | if (current_act_idx == 0) |
1232 | return true; |
1233 | |
1234 | prev_act = flow_act->entries[current_act_idx - 1]; |
1235 | |
1236 | return prev_act.id != FLOW_ACTION_MANGLE; |
1237 | } |
1238 | |
1239 | static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, |
1240 | int current_act_idx) |
1241 | { |
1242 | struct flow_action_entry current_act; |
1243 | struct flow_action_entry next_act; |
1244 | |
1245 | current_act = flow_act->entries[current_act_idx]; |
1246 | if (current_act.id != FLOW_ACTION_MANGLE) |
1247 | return false; |
1248 | |
1249 | if (current_act_idx == flow_act->num_entries) |
1250 | return true; |
1251 | |
1252 | next_act = flow_act->entries[current_act_idx + 1]; |
1253 | |
1254 | return next_act.id != FLOW_ACTION_MANGLE; |
1255 | } |
1256 | |
1257 | int nfp_flower_compile_action(struct nfp_app *app, |
1258 | struct flow_rule *rule, |
1259 | struct net_device *netdev, |
1260 | struct nfp_fl_payload *nfp_flow, |
1261 | struct netlink_ext_ack *extack) |
1262 | { |
1263 | int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; |
1264 | struct nfp_flower_pedit_acts set_act; |
1265 | enum nfp_flower_tun_type tun_type; |
1266 | struct flow_action_entry *act; |
1267 | bool pkt_host = false; |
1268 | u32 csum_updated = 0; |
1269 | |
1270 | if (!flow_action_hw_stats_check(action: &rule->action, extack, |
1271 | allow_bit: FLOW_ACTION_HW_STATS_DELAYED_BIT)) |
1272 | return -EOPNOTSUPP; |
1273 | |
1274 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); |
1275 | nfp_flow->meta.act_len = 0; |
1276 | tun_type = NFP_FL_TUNNEL_NONE; |
1277 | act_len = 0; |
1278 | act_cnt = 0; |
1279 | tun_out_cnt = 0; |
1280 | out_cnt = 0; |
1281 | |
1282 | flow_action_for_each(i, act, &rule->action) { |
1283 | if (nfp_fl_check_mangle_start(flow_act: &rule->action, current_act_idx: i)) |
1284 | memset(&set_act, 0, sizeof(set_act)); |
1285 | err = nfp_flower_loop_action(app, act, rule, nfp_fl: nfp_flow, a_len: &act_len, |
1286 | netdev, tun_type: &tun_type, tun_out_cnt: &tun_out_cnt, |
1287 | out_cnt: &out_cnt, csum_updated: &csum_updated, |
1288 | set_act: &set_act, pkt_host: &pkt_host, extack, act_idx: i); |
1289 | if (err) |
1290 | return err; |
1291 | act_cnt++; |
1292 | if (nfp_fl_check_mangle_end(flow_act: &rule->action, current_act_idx: i)) |
1293 | nfp_fl_commit_mangle(rule, |
1294 | nfp_action: &nfp_flow->action_data[act_len], |
1295 | a_len: &act_len, set_act: &set_act, csum_updated: &csum_updated); |
1296 | } |
1297 | |
1298 | /* We optimise when the action list is small, this can unfortunately |
1299 | * not happen once we have more than one action in the action list. |
1300 | */ |
1301 | if (act_cnt > 1) |
1302 | nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
1303 | |
1304 | nfp_flow->meta.act_len = act_len; |
1305 | |
1306 | return 0; |
1307 | } |
1308 | |