1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2019-2021, Intel Corporation. */ |
3 | |
4 | #include "ice.h" |
5 | #include "ice_tc_lib.h" |
6 | #include "ice_fltr.h" |
7 | #include "ice_lib.h" |
8 | #include "ice_protocol_type.h" |
9 | |
10 | #define ICE_TC_METADATA_LKUP_IDX 0 |
11 | |
12 | /** |
13 | * ice_tc_count_lkups - determine lookup count for switch filter |
14 | * @flags: TC-flower flags |
15 | * @headers: Pointer to TC flower filter header structure |
16 | * @fltr: Pointer to outer TC filter structure |
17 | * |
18 | * Determine lookup count based on TC flower input for switch filter. |
19 | */ |
20 | static int |
21 | ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *, |
22 | struct ice_tc_flower_fltr *fltr) |
23 | { |
24 | int lkups_cnt = 1; /* 0th lookup is metadata */ |
25 | |
26 | /* Always add metadata as the 0th lookup. Included elements: |
27 | * - Direction flag (always present) |
28 | * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) |
29 | * - Tunnel flag (present if tunnel) |
30 | */ |
31 | if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) |
32 | lkups_cnt++; |
33 | |
34 | if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) |
35 | lkups_cnt++; |
36 | |
37 | if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) |
38 | lkups_cnt++; |
39 | |
40 | if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) |
41 | lkups_cnt++; |
42 | |
43 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
44 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | |
45 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
46 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) |
47 | lkups_cnt++; |
48 | |
49 | if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
50 | ICE_TC_FLWR_FIELD_ENC_IP_TTL)) |
51 | lkups_cnt++; |
52 | |
53 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) |
54 | lkups_cnt++; |
55 | |
56 | if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) |
57 | lkups_cnt++; |
58 | |
59 | /* are MAC fields specified? */ |
60 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) |
61 | lkups_cnt++; |
62 | |
63 | /* is VLAN specified? */ |
64 | if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) |
65 | lkups_cnt++; |
66 | |
67 | /* is CVLAN specified? */ |
68 | if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) |
69 | lkups_cnt++; |
70 | |
71 | /* are PPPoE options specified? */ |
72 | if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | |
73 | ICE_TC_FLWR_FIELD_PPP_PROTO)) |
74 | lkups_cnt++; |
75 | |
76 | /* are IPv[4|6] fields specified? */ |
77 | if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | |
78 | ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) |
79 | lkups_cnt++; |
80 | |
81 | if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL)) |
82 | lkups_cnt++; |
83 | |
84 | /* are L2TPv3 options specified? */ |
85 | if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) |
86 | lkups_cnt++; |
87 | |
88 | /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ |
89 | if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | |
90 | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) |
91 | lkups_cnt++; |
92 | |
93 | return lkups_cnt; |
94 | } |
95 | |
96 | static enum ice_protocol_type ice_proto_type_from_mac(bool inner) |
97 | { |
98 | return inner ? ICE_MAC_IL : ICE_MAC_OFOS; |
99 | } |
100 | |
101 | static enum ice_protocol_type ice_proto_type_from_etype(bool inner) |
102 | { |
103 | return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; |
104 | } |
105 | |
106 | static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) |
107 | { |
108 | return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; |
109 | } |
110 | |
111 | static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) |
112 | { |
113 | return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; |
114 | } |
115 | |
116 | static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) |
117 | { |
118 | switch (ip_proto) { |
119 | case IPPROTO_TCP: |
120 | return ICE_TCP_IL; |
121 | case IPPROTO_UDP: |
122 | return ICE_UDP_ILOS; |
123 | } |
124 | |
125 | return 0; |
126 | } |
127 | |
128 | static enum ice_protocol_type |
129 | ice_proto_type_from_tunnel(enum ice_tunnel_type type) |
130 | { |
131 | switch (type) { |
132 | case TNL_VXLAN: |
133 | return ICE_VXLAN; |
134 | case TNL_GENEVE: |
135 | return ICE_GENEVE; |
136 | case TNL_GRETAP: |
137 | return ICE_NVGRE; |
138 | case TNL_GTPU: |
139 | /* NO_PAY profiles will not work with GTP-U */ |
140 | return ICE_GTP; |
141 | case TNL_GTPC: |
142 | return ICE_GTP_NO_PAY; |
143 | default: |
144 | return 0; |
145 | } |
146 | } |
147 | |
148 | static enum ice_sw_tunnel_type |
149 | ice_sw_type_from_tunnel(enum ice_tunnel_type type) |
150 | { |
151 | switch (type) { |
152 | case TNL_VXLAN: |
153 | return ICE_SW_TUN_VXLAN; |
154 | case TNL_GENEVE: |
155 | return ICE_SW_TUN_GENEVE; |
156 | case TNL_GRETAP: |
157 | return ICE_SW_TUN_NVGRE; |
158 | case TNL_GTPU: |
159 | return ICE_SW_TUN_GTPU; |
160 | case TNL_GTPC: |
161 | return ICE_SW_TUN_GTPC; |
162 | default: |
163 | return ICE_NON_TUN; |
164 | } |
165 | } |
166 | |
167 | static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) |
168 | { |
169 | switch (vlan_tpid) { |
170 | case ETH_P_8021Q: |
171 | case ETH_P_8021AD: |
172 | case ETH_P_QINQ1: |
173 | return vlan_tpid; |
174 | default: |
175 | return 0; |
176 | } |
177 | } |
178 | |
179 | static int |
180 | ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, |
181 | struct ice_adv_lkup_elem *list, int i) |
182 | { |
183 | struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; |
184 | |
185 | if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { |
186 | u32 tenant_id; |
187 | |
188 | list[i].type = ice_proto_type_from_tunnel(type: fltr->tunnel_type); |
189 | switch (fltr->tunnel_type) { |
190 | case TNL_VXLAN: |
191 | case TNL_GENEVE: |
192 | tenant_id = be32_to_cpu(fltr->tenant_id) << 8; |
193 | list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); |
194 | memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00" , 4); |
195 | i++; |
196 | break; |
197 | case TNL_GRETAP: |
198 | list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; |
199 | memcpy(&list[i].m_u.nvgre_hdr.tni_flow, |
200 | "\xff\xff\xff\xff" , 4); |
201 | i++; |
202 | break; |
203 | case TNL_GTPC: |
204 | case TNL_GTPU: |
205 | list[i].h_u.gtp_hdr.teid = fltr->tenant_id; |
206 | memcpy(&list[i].m_u.gtp_hdr.teid, |
207 | "\xff\xff\xff\xff" , 4); |
208 | i++; |
209 | break; |
210 | default: |
211 | break; |
212 | } |
213 | } |
214 | |
215 | if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { |
216 | list[i].type = ice_proto_type_from_mac(inner: false); |
217 | ether_addr_copy(dst: list[i].h_u.eth_hdr.dst_addr, |
218 | src: hdr->l2_key.dst_mac); |
219 | ether_addr_copy(dst: list[i].m_u.eth_hdr.dst_addr, |
220 | src: hdr->l2_mask.dst_mac); |
221 | i++; |
222 | } |
223 | |
224 | if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && |
225 | (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { |
226 | list[i].type = ice_proto_type_from_tunnel(type: fltr->tunnel_type); |
227 | |
228 | if (fltr->gtp_pdu_info_masks.pdu_type) { |
229 | list[i].h_u.gtp_hdr.pdu_type = |
230 | fltr->gtp_pdu_info_keys.pdu_type << 4; |
231 | memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0" , 1); |
232 | } |
233 | |
234 | if (fltr->gtp_pdu_info_masks.qfi) { |
235 | list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; |
236 | memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f" , 1); |
237 | } |
238 | |
239 | i++; |
240 | } |
241 | |
242 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
243 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { |
244 | list[i].type = ice_proto_type_from_ipv4(inner: false); |
245 | |
246 | if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { |
247 | list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; |
248 | list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; |
249 | } |
250 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { |
251 | list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; |
252 | list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; |
253 | } |
254 | i++; |
255 | } |
256 | |
257 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
258 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { |
259 | list[i].type = ice_proto_type_from_ipv6(inner: false); |
260 | |
261 | if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { |
262 | memcpy(&list[i].h_u.ipv6_hdr.src_addr, |
263 | &hdr->l3_key.src_ipv6_addr, |
264 | sizeof(hdr->l3_key.src_ipv6_addr)); |
265 | memcpy(&list[i].m_u.ipv6_hdr.src_addr, |
266 | &hdr->l3_mask.src_ipv6_addr, |
267 | sizeof(hdr->l3_mask.src_ipv6_addr)); |
268 | } |
269 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { |
270 | memcpy(&list[i].h_u.ipv6_hdr.dst_addr, |
271 | &hdr->l3_key.dst_ipv6_addr, |
272 | sizeof(hdr->l3_key.dst_ipv6_addr)); |
273 | memcpy(&list[i].m_u.ipv6_hdr.dst_addr, |
274 | &hdr->l3_mask.dst_ipv6_addr, |
275 | sizeof(hdr->l3_mask.dst_ipv6_addr)); |
276 | } |
277 | i++; |
278 | } |
279 | |
280 | if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) && |
281 | (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
282 | ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { |
283 | list[i].type = ice_proto_type_from_ipv4(inner: false); |
284 | |
285 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { |
286 | list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos; |
287 | list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos; |
288 | } |
289 | |
290 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { |
291 | list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl; |
292 | list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl; |
293 | } |
294 | |
295 | i++; |
296 | } |
297 | |
298 | if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) && |
299 | (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
300 | ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { |
301 | struct ice_ipv6_hdr *hdr_h, *hdr_m; |
302 | |
303 | hdr_h = &list[i].h_u.ipv6_hdr; |
304 | hdr_m = &list[i].m_u.ipv6_hdr; |
305 | list[i].type = ice_proto_type_from_ipv6(inner: false); |
306 | |
307 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { |
308 | be32p_replace_bits(p: &hdr_h->be_ver_tc_flow, |
309 | val: hdr->l3_key.tos, |
310 | ICE_IPV6_HDR_TC_MASK); |
311 | be32p_replace_bits(p: &hdr_m->be_ver_tc_flow, |
312 | val: hdr->l3_mask.tos, |
313 | ICE_IPV6_HDR_TC_MASK); |
314 | } |
315 | |
316 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { |
317 | hdr_h->hop_limit = hdr->l3_key.ttl; |
318 | hdr_m->hop_limit = hdr->l3_mask.ttl; |
319 | } |
320 | |
321 | i++; |
322 | } |
323 | |
324 | if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && |
325 | hdr->l3_key.ip_proto == IPPROTO_UDP) { |
326 | list[i].type = ICE_UDP_OF; |
327 | list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; |
328 | list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; |
329 | i++; |
330 | } |
331 | |
332 | /* always fill matching on tunneled packets in metadata */ |
333 | ice_rule_add_tunnel_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
334 | |
335 | return i; |
336 | } |
337 | |
338 | /** |
339 | * ice_tc_fill_rules - fill filter rules based on TC fltr |
340 | * @hw: pointer to HW structure |
341 | * @flags: tc flower field flags |
342 | * @tc_fltr: pointer to TC flower filter |
343 | * @list: list of advance rule elements |
344 | * @rule_info: pointer to information about rule |
345 | * @l4_proto: pointer to information such as L4 proto type |
346 | * |
347 | * Fill ice_adv_lkup_elem list based on TC flower flags and |
348 | * TC flower headers. This list should be used to add |
349 | * advance filter in hardware. |
350 | */ |
351 | static int |
352 | ice_tc_fill_rules(struct ice_hw *hw, u32 flags, |
353 | struct ice_tc_flower_fltr *tc_fltr, |
354 | struct ice_adv_lkup_elem *list, |
355 | struct ice_adv_rule_info *rule_info, |
356 | u16 *l4_proto) |
357 | { |
358 | struct ice_tc_flower_lyr_2_4_hdrs * = &tc_fltr->outer_headers; |
359 | bool inner = false; |
360 | u16 vlan_tpid = 0; |
361 | int i = 1; /* 0th lookup is metadata */ |
362 | |
363 | rule_info->vlan_type = vlan_tpid; |
364 | |
365 | /* Always add direction metadata */ |
366 | ice_rule_add_direction_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
367 | |
368 | if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { |
369 | ice_rule_add_src_vsi_metadata(lkup: &list[i]); |
370 | i++; |
371 | } |
372 | |
373 | rule_info->tun_type = ice_sw_type_from_tunnel(type: tc_fltr->tunnel_type); |
374 | if (tc_fltr->tunnel_type != TNL_LAST) { |
375 | i = ice_tc_fill_tunnel_outer(flags, fltr: tc_fltr, list, i); |
376 | |
377 | headers = &tc_fltr->inner_headers; |
378 | inner = true; |
379 | } |
380 | |
381 | if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { |
382 | list[i].type = ice_proto_type_from_etype(inner); |
383 | list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; |
384 | list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; |
385 | i++; |
386 | } |
387 | |
388 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
389 | ICE_TC_FLWR_FIELD_SRC_MAC)) { |
390 | struct ice_tc_l2_hdr *l2_key, *l2_mask; |
391 | |
392 | l2_key = &headers->l2_key; |
393 | l2_mask = &headers->l2_mask; |
394 | |
395 | list[i].type = ice_proto_type_from_mac(inner); |
396 | if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { |
397 | ether_addr_copy(dst: list[i].h_u.eth_hdr.dst_addr, |
398 | src: l2_key->dst_mac); |
399 | ether_addr_copy(dst: list[i].m_u.eth_hdr.dst_addr, |
400 | src: l2_mask->dst_mac); |
401 | } |
402 | if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { |
403 | ether_addr_copy(dst: list[i].h_u.eth_hdr.src_addr, |
404 | src: l2_key->src_mac); |
405 | ether_addr_copy(dst: list[i].m_u.eth_hdr.src_addr, |
406 | src: l2_mask->src_mac); |
407 | } |
408 | i++; |
409 | } |
410 | |
411 | /* copy VLAN info */ |
412 | if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) { |
413 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) |
414 | list[i].type = ICE_VLAN_EX; |
415 | else |
416 | list[i].type = ICE_VLAN_OFOS; |
417 | |
418 | if (flags & ICE_TC_FLWR_FIELD_VLAN) { |
419 | list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; |
420 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF); |
421 | } |
422 | |
423 | if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) { |
424 | if (flags & ICE_TC_FLWR_FIELD_VLAN) { |
425 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF); |
426 | } else { |
427 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000); |
428 | list[i].h_u.vlan_hdr.vlan = 0; |
429 | } |
430 | list[i].h_u.vlan_hdr.vlan |= |
431 | headers->vlan_hdr.vlan_prio; |
432 | } |
433 | |
434 | i++; |
435 | } |
436 | |
437 | if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) { |
438 | vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); |
439 | rule_info->vlan_type = |
440 | ice_check_supported_vlan_tpid(vlan_tpid); |
441 | |
442 | ice_rule_add_vlan_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
443 | } |
444 | |
445 | if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) { |
446 | list[i].type = ICE_VLAN_IN; |
447 | |
448 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) { |
449 | list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id; |
450 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF); |
451 | } |
452 | |
453 | if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) { |
454 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) { |
455 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF); |
456 | } else { |
457 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000); |
458 | list[i].h_u.vlan_hdr.vlan = 0; |
459 | } |
460 | list[i].h_u.vlan_hdr.vlan |= |
461 | headers->cvlan_hdr.vlan_prio; |
462 | } |
463 | |
464 | i++; |
465 | } |
466 | |
467 | if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | |
468 | ICE_TC_FLWR_FIELD_PPP_PROTO)) { |
469 | struct ice_pppoe_hdr *vals, *masks; |
470 | |
471 | vals = &list[i].h_u.pppoe_hdr; |
472 | masks = &list[i].m_u.pppoe_hdr; |
473 | |
474 | list[i].type = ICE_PPPOE; |
475 | |
476 | if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) { |
477 | vals->session_id = headers->pppoe_hdr.session_id; |
478 | masks->session_id = cpu_to_be16(0xFFFF); |
479 | } |
480 | |
481 | if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) { |
482 | vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto; |
483 | masks->ppp_prot_id = cpu_to_be16(0xFFFF); |
484 | } |
485 | |
486 | i++; |
487 | } |
488 | |
489 | /* copy L3 (IPv[4|6]: src, dest) address */ |
490 | if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | |
491 | ICE_TC_FLWR_FIELD_SRC_IPV4)) { |
492 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
493 | |
494 | list[i].type = ice_proto_type_from_ipv4(inner); |
495 | l3_key = &headers->l3_key; |
496 | l3_mask = &headers->l3_mask; |
497 | if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { |
498 | list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; |
499 | list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; |
500 | } |
501 | if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { |
502 | list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; |
503 | list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; |
504 | } |
505 | i++; |
506 | } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | |
507 | ICE_TC_FLWR_FIELD_SRC_IPV6)) { |
508 | struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; |
509 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
510 | |
511 | list[i].type = ice_proto_type_from_ipv6(inner); |
512 | ipv6_hdr = &list[i].h_u.ipv6_hdr; |
513 | ipv6_mask = &list[i].m_u.ipv6_hdr; |
514 | l3_key = &headers->l3_key; |
515 | l3_mask = &headers->l3_mask; |
516 | |
517 | if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { |
518 | memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, |
519 | sizeof(l3_key->dst_ipv6_addr)); |
520 | memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, |
521 | sizeof(l3_mask->dst_ipv6_addr)); |
522 | } |
523 | if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { |
524 | memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, |
525 | sizeof(l3_key->src_ipv6_addr)); |
526 | memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, |
527 | sizeof(l3_mask->src_ipv6_addr)); |
528 | } |
529 | i++; |
530 | } |
531 | |
532 | if (headers->l2_key.n_proto == htons(ETH_P_IP) && |
533 | (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { |
534 | list[i].type = ice_proto_type_from_ipv4(inner); |
535 | |
536 | if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { |
537 | list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos; |
538 | list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos; |
539 | } |
540 | |
541 | if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { |
542 | list[i].h_u.ipv4_hdr.time_to_live = |
543 | headers->l3_key.ttl; |
544 | list[i].m_u.ipv4_hdr.time_to_live = |
545 | headers->l3_mask.ttl; |
546 | } |
547 | |
548 | i++; |
549 | } |
550 | |
551 | if (headers->l2_key.n_proto == htons(ETH_P_IPV6) && |
552 | (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { |
553 | struct ice_ipv6_hdr *hdr_h, *hdr_m; |
554 | |
555 | hdr_h = &list[i].h_u.ipv6_hdr; |
556 | hdr_m = &list[i].m_u.ipv6_hdr; |
557 | list[i].type = ice_proto_type_from_ipv6(inner); |
558 | |
559 | if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { |
560 | be32p_replace_bits(p: &hdr_h->be_ver_tc_flow, |
561 | val: headers->l3_key.tos, |
562 | ICE_IPV6_HDR_TC_MASK); |
563 | be32p_replace_bits(p: &hdr_m->be_ver_tc_flow, |
564 | val: headers->l3_mask.tos, |
565 | ICE_IPV6_HDR_TC_MASK); |
566 | } |
567 | |
568 | if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { |
569 | hdr_h->hop_limit = headers->l3_key.ttl; |
570 | hdr_m->hop_limit = headers->l3_mask.ttl; |
571 | } |
572 | |
573 | i++; |
574 | } |
575 | |
576 | if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) { |
577 | list[i].type = ICE_L2TPV3; |
578 | |
579 | list[i].h_u.l2tpv3_sess_hdr.session_id = |
580 | headers->l2tpv3_hdr.session_id; |
581 | list[i].m_u.l2tpv3_sess_hdr.session_id = |
582 | cpu_to_be32(0xFFFFFFFF); |
583 | |
584 | i++; |
585 | } |
586 | |
587 | /* copy L4 (src, dest) port */ |
588 | if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | |
589 | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { |
590 | struct ice_tc_l4_hdr *l4_key, *l4_mask; |
591 | |
592 | list[i].type = ice_proto_type_from_l4_port(ip_proto: headers->l3_key.ip_proto); |
593 | l4_key = &headers->l4_key; |
594 | l4_mask = &headers->l4_mask; |
595 | |
596 | if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { |
597 | list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; |
598 | list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; |
599 | } |
600 | if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { |
601 | list[i].h_u.l4_hdr.src_port = l4_key->src_port; |
602 | list[i].m_u.l4_hdr.src_port = l4_mask->src_port; |
603 | } |
604 | i++; |
605 | } |
606 | |
607 | return i; |
608 | } |
609 | |
610 | /** |
611 | * ice_tc_tun_get_type - get the tunnel type |
612 | * @tunnel_dev: ptr to tunnel device |
613 | * |
614 | * This function detects appropriate tunnel_type if specified device is |
615 | * tunnel device such as VXLAN/Geneve |
616 | */ |
617 | static int ice_tc_tun_get_type(struct net_device *tunnel_dev) |
618 | { |
619 | if (netif_is_vxlan(dev: tunnel_dev)) |
620 | return TNL_VXLAN; |
621 | if (netif_is_geneve(dev: tunnel_dev)) |
622 | return TNL_GENEVE; |
623 | if (netif_is_gretap(dev: tunnel_dev) || |
624 | netif_is_ip6gretap(dev: tunnel_dev)) |
625 | return TNL_GRETAP; |
626 | |
627 | /* Assume GTP-U by default in case of GTP netdev. |
628 | * GTP-C may be selected later, based on enc_dst_port. |
629 | */ |
630 | if (netif_is_gtp(dev: tunnel_dev)) |
631 | return TNL_GTPU; |
632 | return TNL_LAST; |
633 | } |
634 | |
635 | bool ice_is_tunnel_supported(struct net_device *dev) |
636 | { |
637 | return ice_tc_tun_get_type(tunnel_dev: dev) != TNL_LAST; |
638 | } |
639 | |
640 | static bool ice_tc_is_dev_uplink(struct net_device *dev) |
641 | { |
642 | return netif_is_ice(dev) || ice_is_tunnel_supported(dev); |
643 | } |
644 | |
645 | static int ice_tc_setup_redirect_action(struct net_device *filter_dev, |
646 | struct ice_tc_flower_fltr *fltr, |
647 | struct net_device *target_dev) |
648 | { |
649 | struct ice_repr *repr; |
650 | |
651 | fltr->action.fltr_act = ICE_FWD_TO_VSI; |
652 | |
653 | if (ice_is_port_repr_netdev(netdev: filter_dev) && |
654 | ice_is_port_repr_netdev(netdev: target_dev)) { |
655 | repr = ice_netdev_to_repr(netdev: target_dev); |
656 | |
657 | fltr->dest_vsi = repr->src_vsi; |
658 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
659 | } else if (ice_is_port_repr_netdev(netdev: filter_dev) && |
660 | ice_tc_is_dev_uplink(dev: target_dev)) { |
661 | repr = ice_netdev_to_repr(netdev: filter_dev); |
662 | |
663 | fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; |
664 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
665 | } else if (ice_tc_is_dev_uplink(dev: filter_dev) && |
666 | ice_is_port_repr_netdev(netdev: target_dev)) { |
667 | repr = ice_netdev_to_repr(netdev: target_dev); |
668 | |
669 | fltr->dest_vsi = repr->src_vsi; |
670 | fltr->direction = ICE_ESWITCH_FLTR_INGRESS; |
671 | } else { |
672 | NL_SET_ERR_MSG_MOD(fltr->extack, |
673 | "Unsupported netdevice in switchdev mode" ); |
674 | return -EINVAL; |
675 | } |
676 | |
677 | return 0; |
678 | } |
679 | |
680 | static int |
681 | ice_tc_setup_drop_action(struct net_device *filter_dev, |
682 | struct ice_tc_flower_fltr *fltr) |
683 | { |
684 | fltr->action.fltr_act = ICE_DROP_PACKET; |
685 | |
686 | if (ice_is_port_repr_netdev(netdev: filter_dev)) { |
687 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
688 | } else if (ice_tc_is_dev_uplink(dev: filter_dev)) { |
689 | fltr->direction = ICE_ESWITCH_FLTR_INGRESS; |
690 | } else { |
691 | NL_SET_ERR_MSG_MOD(fltr->extack, |
692 | "Unsupported netdevice in switchdev mode" ); |
693 | return -EINVAL; |
694 | } |
695 | |
696 | return 0; |
697 | } |
698 | |
699 | static int ice_tc_setup_mirror_action(struct net_device *filter_dev, |
700 | struct ice_tc_flower_fltr *fltr, |
701 | struct net_device *target_dev) |
702 | { |
703 | struct ice_repr *repr; |
704 | |
705 | fltr->action.fltr_act = ICE_MIRROR_PACKET; |
706 | |
707 | if (ice_is_port_repr_netdev(netdev: filter_dev) && |
708 | ice_is_port_repr_netdev(netdev: target_dev)) { |
709 | repr = ice_netdev_to_repr(netdev: target_dev); |
710 | |
711 | fltr->dest_vsi = repr->src_vsi; |
712 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
713 | } else if (ice_is_port_repr_netdev(netdev: filter_dev) && |
714 | ice_tc_is_dev_uplink(dev: target_dev)) { |
715 | repr = ice_netdev_to_repr(netdev: filter_dev); |
716 | |
717 | fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; |
718 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
719 | } else if (ice_tc_is_dev_uplink(dev: filter_dev) && |
720 | ice_is_port_repr_netdev(netdev: target_dev)) { |
721 | repr = ice_netdev_to_repr(netdev: target_dev); |
722 | |
723 | fltr->dest_vsi = repr->src_vsi; |
724 | fltr->direction = ICE_ESWITCH_FLTR_INGRESS; |
725 | } else { |
726 | NL_SET_ERR_MSG_MOD(fltr->extack, |
727 | "Unsupported netdevice in switchdev mode" ); |
728 | return -EINVAL; |
729 | } |
730 | |
731 | return 0; |
732 | } |
733 | |
734 | static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, |
735 | struct ice_tc_flower_fltr *fltr, |
736 | struct flow_action_entry *act) |
737 | { |
738 | int err; |
739 | |
740 | switch (act->id) { |
741 | case FLOW_ACTION_DROP: |
742 | err = ice_tc_setup_drop_action(filter_dev, fltr); |
743 | if (err) |
744 | return err; |
745 | |
746 | break; |
747 | |
748 | case FLOW_ACTION_REDIRECT: |
749 | err = ice_tc_setup_redirect_action(filter_dev, fltr, target_dev: act->dev); |
750 | if (err) |
751 | return err; |
752 | |
753 | break; |
754 | |
755 | case FLOW_ACTION_MIRRED: |
756 | err = ice_tc_setup_mirror_action(filter_dev, fltr, target_dev: act->dev); |
757 | if (err) |
758 | return err; |
759 | break; |
760 | |
761 | default: |
762 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode" ); |
763 | return -EINVAL; |
764 | } |
765 | |
766 | return 0; |
767 | } |
768 | |
769 | static int |
770 | ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
771 | { |
772 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
773 | struct ice_adv_rule_info rule_info = { 0 }; |
774 | struct ice_rule_query_data rule_added; |
775 | struct ice_hw *hw = &vsi->back->hw; |
776 | struct ice_adv_lkup_elem *list; |
777 | u32 flags = fltr->flags; |
778 | int lkups_cnt; |
779 | int ret; |
780 | int i; |
781 | |
782 | if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) { |
783 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)" ); |
784 | return -EOPNOTSUPP; |
785 | } |
786 | |
787 | lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); |
788 | list = kcalloc(n: lkups_cnt, size: sizeof(*list), GFP_ATOMIC); |
789 | if (!list) |
790 | return -ENOMEM; |
791 | |
792 | i = ice_tc_fill_rules(hw, flags, tc_fltr: fltr, list, rule_info: &rule_info, NULL); |
793 | if (i != lkups_cnt) { |
794 | ret = -EINVAL; |
795 | goto exit; |
796 | } |
797 | |
798 | rule_info.sw_act.fltr_act = fltr->action.fltr_act; |
799 | if (fltr->action.fltr_act != ICE_DROP_PACKET) |
800 | rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; |
801 | /* For now, making priority to be highest, and it also becomes |
802 | * the priority for recipe which will get created as a result of |
803 | * new extraction sequence based on input set. |
804 | * Priority '7' is max val for switch recipe, higher the number |
805 | * results into order of switch rule evaluation. |
806 | */ |
807 | rule_info.priority = 7; |
808 | rule_info.flags_info.act_valid = true; |
809 | |
810 | if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { |
811 | /* Uplink to VF */ |
812 | rule_info.sw_act.flag |= ICE_FLTR_RX; |
813 | rule_info.sw_act.src = hw->pf_id; |
814 | rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; |
815 | } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && |
816 | fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { |
817 | /* VF to Uplink */ |
818 | rule_info.sw_act.flag |= ICE_FLTR_TX; |
819 | rule_info.sw_act.src = vsi->idx; |
820 | rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; |
821 | } else { |
822 | /* VF to VF */ |
823 | rule_info.sw_act.flag |= ICE_FLTR_TX; |
824 | rule_info.sw_act.src = vsi->idx; |
825 | rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; |
826 | } |
827 | |
828 | /* specify the cookie as filter_rule_id */ |
829 | rule_info.fltr_rule_id = fltr->cookie; |
830 | rule_info.src_vsi = vsi->idx; |
831 | |
832 | ret = ice_add_adv_rule(hw, lkups: list, lkups_cnt, rinfo: &rule_info, added_entry: &rule_added); |
833 | if (ret == -EEXIST) { |
834 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist" ); |
835 | ret = -EINVAL; |
836 | goto exit; |
837 | } else if (ret) { |
838 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error" ); |
839 | goto exit; |
840 | } |
841 | |
842 | /* store the output params, which are needed later for removing |
843 | * advanced switch filter |
844 | */ |
845 | fltr->rid = rule_added.rid; |
846 | fltr->rule_id = rule_added.rule_id; |
847 | fltr->dest_vsi_handle = rule_added.vsi_handle; |
848 | |
849 | exit: |
850 | kfree(objp: list); |
851 | return ret; |
852 | } |
853 | |
854 | /** |
855 | * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) |
856 | * @vsi: Pointer to VSI |
857 | * @queue: Queue index |
858 | * |
859 | * Locate the VSI using specified "queue". When ADQ is not enabled, |
860 | * always return input VSI, otherwise locate corresponding |
861 | * VSI based on per channel "offset" and "qcount" |
862 | */ |
863 | struct ice_vsi * |
864 | ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue) |
865 | { |
866 | int num_tc, tc; |
867 | |
868 | /* if ADQ is not active, passed VSI is the candidate VSI */ |
869 | if (!ice_is_adq_active(pf: vsi->back)) |
870 | return vsi; |
871 | |
872 | /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending |
873 | * upon queue number) |
874 | */ |
875 | num_tc = vsi->mqprio_qopt.qopt.num_tc; |
876 | |
877 | for (tc = 0; tc < num_tc; tc++) { |
878 | int qcount = vsi->mqprio_qopt.qopt.count[tc]; |
879 | int offset = vsi->mqprio_qopt.qopt.offset[tc]; |
880 | |
881 | if (queue >= offset && queue < offset + qcount) { |
882 | /* for non-ADQ TCs, passed VSI is the candidate VSI */ |
883 | if (tc < ICE_CHNL_START_TC) |
884 | return vsi; |
885 | else |
886 | return vsi->tc_map_vsi[tc]; |
887 | } |
888 | } |
889 | return NULL; |
890 | } |
891 | |
892 | static struct ice_rx_ring * |
893 | ice_locate_rx_ring_using_queue(struct ice_vsi *vsi, |
894 | struct ice_tc_flower_fltr *tc_fltr) |
895 | { |
896 | u16 queue = tc_fltr->action.fwd.q.queue; |
897 | |
898 | return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL; |
899 | } |
900 | |
901 | /** |
902 | * ice_tc_forward_action - Determine destination VSI and queue for the action |
903 | * @vsi: Pointer to VSI |
904 | * @tc_fltr: Pointer to TC flower filter structure |
905 | * |
906 | * Validates the tc forward action and determines the destination VSI and queue |
907 | * for the forward action. |
908 | */ |
909 | static struct ice_vsi * |
910 | ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) |
911 | { |
912 | struct ice_rx_ring *ring = NULL; |
913 | struct ice_vsi *dest_vsi = NULL; |
914 | struct ice_pf *pf = vsi->back; |
915 | struct device *dev; |
916 | u32 tc_class; |
917 | int q; |
918 | |
919 | dev = ice_pf_to_dev(pf); |
920 | |
921 | /* Get the destination VSI and/or destination queue and validate them */ |
922 | switch (tc_fltr->action.fltr_act) { |
923 | case ICE_FWD_TO_VSI: |
924 | tc_class = tc_fltr->action.fwd.tc.tc_class; |
925 | /* Select the destination VSI */ |
926 | if (tc_class < ICE_CHNL_START_TC) { |
927 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
928 | "Unable to add filter because of unsupported destination" ); |
929 | return ERR_PTR(error: -EOPNOTSUPP); |
930 | } |
931 | /* Locate ADQ VSI depending on hw_tc number */ |
932 | dest_vsi = vsi->tc_map_vsi[tc_class]; |
933 | break; |
934 | case ICE_FWD_TO_Q: |
935 | /* Locate the Rx queue */ |
936 | ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); |
937 | if (!ring) { |
938 | dev_err(dev, |
939 | "Unable to locate Rx queue for action fwd_to_queue: %u\n" , |
940 | tc_fltr->action.fwd.q.queue); |
941 | return ERR_PTR(error: -EINVAL); |
942 | } |
943 | /* Determine destination VSI even though the action is |
944 | * FWD_TO_QUEUE, because QUEUE is associated with VSI |
945 | */ |
946 | q = tc_fltr->action.fwd.q.queue; |
947 | dest_vsi = ice_locate_vsi_using_queue(vsi, queue: q); |
948 | break; |
949 | default: |
950 | dev_err(dev, |
951 | "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n" , |
952 | tc_fltr->action.fltr_act); |
953 | return ERR_PTR(error: -EINVAL); |
954 | } |
955 | /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ |
956 | if (!dest_vsi) { |
957 | dev_err(dev, |
958 | "Unable to add filter because specified destination VSI doesn't exist\n" ); |
959 | return ERR_PTR(error: -EINVAL); |
960 | } |
961 | return dest_vsi; |
962 | } |
963 | |
964 | /** |
965 | * ice_add_tc_flower_adv_fltr - add appropriate filter rules |
966 | * @vsi: Pointer to VSI |
967 | * @tc_fltr: Pointer to TC flower filter structure |
968 | * |
969 | * based on filter parameters using Advance recipes supported |
970 | * by OS package. |
971 | */ |
972 | static int |
973 | ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, |
974 | struct ice_tc_flower_fltr *tc_fltr) |
975 | { |
976 | struct ice_tc_flower_lyr_2_4_hdrs * = &tc_fltr->outer_headers; |
977 | struct ice_adv_rule_info rule_info = {0}; |
978 | struct ice_rule_query_data rule_added; |
979 | struct ice_adv_lkup_elem *list; |
980 | struct ice_pf *pf = vsi->back; |
981 | struct ice_hw *hw = &pf->hw; |
982 | u32 flags = tc_fltr->flags; |
983 | struct ice_vsi *dest_vsi; |
984 | struct device *dev; |
985 | u16 lkups_cnt = 0; |
986 | u16 l4_proto = 0; |
987 | int ret = 0; |
988 | u16 i = 0; |
989 | |
990 | dev = ice_pf_to_dev(pf); |
991 | if (ice_is_safe_mode(pf)) { |
992 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode" ); |
993 | return -EOPNOTSUPP; |
994 | } |
995 | |
996 | if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | |
997 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
998 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | |
999 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
1000 | ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { |
1001 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)" ); |
1002 | return -EOPNOTSUPP; |
1003 | } |
1004 | |
1005 | /* validate forwarding action VSI and queue */ |
1006 | if (ice_is_forward_action(fltr_act: tc_fltr->action.fltr_act)) { |
1007 | dest_vsi = ice_tc_forward_action(vsi, tc_fltr); |
1008 | if (IS_ERR(ptr: dest_vsi)) |
1009 | return PTR_ERR(ptr: dest_vsi); |
1010 | } |
1011 | |
1012 | lkups_cnt = ice_tc_count_lkups(flags, headers, fltr: tc_fltr); |
1013 | list = kcalloc(n: lkups_cnt, size: sizeof(*list), GFP_ATOMIC); |
1014 | if (!list) |
1015 | return -ENOMEM; |
1016 | |
1017 | i = ice_tc_fill_rules(hw, flags, tc_fltr, list, rule_info: &rule_info, l4_proto: &l4_proto); |
1018 | if (i != lkups_cnt) { |
1019 | ret = -EINVAL; |
1020 | goto exit; |
1021 | } |
1022 | |
1023 | rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; |
1024 | /* specify the cookie as filter_rule_id */ |
1025 | rule_info.fltr_rule_id = tc_fltr->cookie; |
1026 | |
1027 | switch (tc_fltr->action.fltr_act) { |
1028 | case ICE_FWD_TO_VSI: |
1029 | rule_info.sw_act.vsi_handle = dest_vsi->idx; |
1030 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; |
1031 | rule_info.sw_act.src = hw->pf_id; |
1032 | dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n" , |
1033 | tc_fltr->action.fwd.tc.tc_class, |
1034 | rule_info.sw_act.vsi_handle, lkups_cnt); |
1035 | break; |
1036 | case ICE_FWD_TO_Q: |
1037 | /* HW queue number in global space */ |
1038 | rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; |
1039 | rule_info.sw_act.vsi_handle = dest_vsi->idx; |
1040 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; |
1041 | rule_info.sw_act.src = hw->pf_id; |
1042 | dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n" , |
1043 | tc_fltr->action.fwd.q.queue, |
1044 | tc_fltr->action.fwd.q.hw_queue, lkups_cnt); |
1045 | break; |
1046 | case ICE_DROP_PACKET: |
1047 | rule_info.sw_act.flag |= ICE_FLTR_RX; |
1048 | rule_info.sw_act.src = hw->pf_id; |
1049 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; |
1050 | break; |
1051 | default: |
1052 | ret = -EOPNOTSUPP; |
1053 | goto exit; |
1054 | } |
1055 | |
1056 | ret = ice_add_adv_rule(hw, lkups: list, lkups_cnt, rinfo: &rule_info, added_entry: &rule_added); |
1057 | if (ret == -EEXIST) { |
1058 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
1059 | "Unable to add filter because it already exist" ); |
1060 | ret = -EINVAL; |
1061 | goto exit; |
1062 | } else if (ret) { |
1063 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
1064 | "Unable to add filter due to error" ); |
1065 | goto exit; |
1066 | } |
1067 | |
1068 | /* store the output params, which are needed later for removing |
1069 | * advanced switch filter |
1070 | */ |
1071 | tc_fltr->rid = rule_added.rid; |
1072 | tc_fltr->rule_id = rule_added.rule_id; |
1073 | tc_fltr->dest_vsi_handle = rule_added.vsi_handle; |
1074 | if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || |
1075 | tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { |
1076 | tc_fltr->dest_vsi = dest_vsi; |
1077 | /* keep track of advanced switch filter for |
1078 | * destination VSI |
1079 | */ |
1080 | dest_vsi->num_chnl_fltr++; |
1081 | |
1082 | /* keeps track of channel filters for PF VSI */ |
1083 | if (vsi->type == ICE_VSI_PF && |
1084 | (flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1085 | ICE_TC_FLWR_FIELD_ENC_DST_MAC))) |
1086 | pf->num_dmac_chnl_fltrs++; |
1087 | } |
1088 | switch (tc_fltr->action.fltr_act) { |
1089 | case ICE_FWD_TO_VSI: |
1090 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n" , |
1091 | lkups_cnt, flags, |
1092 | tc_fltr->action.fwd.tc.tc_class, rule_added.rid, |
1093 | rule_added.rule_id, rule_added.vsi_handle); |
1094 | break; |
1095 | case ICE_FWD_TO_Q: |
1096 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n" , |
1097 | lkups_cnt, flags, tc_fltr->action.fwd.q.queue, |
1098 | tc_fltr->action.fwd.q.hw_queue, rule_added.rid, |
1099 | rule_added.rule_id); |
1100 | break; |
1101 | case ICE_DROP_PACKET: |
1102 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n" , |
1103 | lkups_cnt, flags, rule_added.rid, rule_added.rule_id); |
1104 | break; |
1105 | default: |
1106 | break; |
1107 | } |
1108 | exit: |
1109 | kfree(objp: list); |
1110 | return ret; |
1111 | } |
1112 | |
1113 | /** |
1114 | * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter |
1115 | * @match: Pointer to flow match structure |
1116 | * @fltr: Pointer to filter structure |
1117 | * @headers: Pointer to outer header fields |
1118 | * @returns PPP protocol used in filter (ppp_ses or ppp_disc) |
1119 | */ |
1120 | static u16 |
1121 | ice_tc_set_pppoe(struct flow_match_pppoe *match, |
1122 | struct ice_tc_flower_fltr *fltr, |
1123 | struct ice_tc_flower_lyr_2_4_hdrs *) |
1124 | { |
1125 | if (match->mask->session_id) { |
1126 | fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID; |
1127 | headers->pppoe_hdr.session_id = match->key->session_id; |
1128 | } |
1129 | |
1130 | if (match->mask->ppp_proto) { |
1131 | fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO; |
1132 | headers->pppoe_hdr.ppp_proto = match->key->ppp_proto; |
1133 | } |
1134 | |
1135 | return be16_to_cpu(match->key->type); |
1136 | } |
1137 | |
1138 | /** |
1139 | * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter |
1140 | * @match: Pointer to flow match structure |
1141 | * @fltr: Pointer to filter structure |
1142 | * @headers: inner or outer header fields |
1143 | * @is_encap: set true for tunnel IPv4 address |
1144 | */ |
1145 | static int |
1146 | ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, |
1147 | struct ice_tc_flower_fltr *fltr, |
1148 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1149 | { |
1150 | if (match->key->dst) { |
1151 | if (is_encap) |
1152 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; |
1153 | else |
1154 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; |
1155 | headers->l3_key.dst_ipv4 = match->key->dst; |
1156 | headers->l3_mask.dst_ipv4 = match->mask->dst; |
1157 | } |
1158 | if (match->key->src) { |
1159 | if (is_encap) |
1160 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; |
1161 | else |
1162 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; |
1163 | headers->l3_key.src_ipv4 = match->key->src; |
1164 | headers->l3_mask.src_ipv4 = match->mask->src; |
1165 | } |
1166 | return 0; |
1167 | } |
1168 | |
1169 | /** |
1170 | * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter |
1171 | * @match: Pointer to flow match structure |
1172 | * @fltr: Pointer to filter structure |
1173 | * @headers: inner or outer header fields |
1174 | * @is_encap: set true for tunnel IPv6 address |
1175 | */ |
1176 | static int |
1177 | ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, |
1178 | struct ice_tc_flower_fltr *fltr, |
1179 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1180 | { |
1181 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
1182 | |
1183 | /* src and dest IPV6 address should not be LOOPBACK |
1184 | * (0:0:0:0:0:0:0:1), which can be represented as ::1 |
1185 | */ |
1186 | if (ipv6_addr_loopback(a: &match->key->dst) || |
1187 | ipv6_addr_loopback(a: &match->key->src)) { |
1188 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK" ); |
1189 | return -EINVAL; |
1190 | } |
1191 | /* if src/dest IPv6 address is *,* error */ |
1192 | if (ipv6_addr_any(a: &match->mask->dst) && |
1193 | ipv6_addr_any(a: &match->mask->src)) { |
1194 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any" ); |
1195 | return -EINVAL; |
1196 | } |
1197 | if (!ipv6_addr_any(a: &match->mask->dst)) { |
1198 | if (is_encap) |
1199 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; |
1200 | else |
1201 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; |
1202 | } |
1203 | if (!ipv6_addr_any(a: &match->mask->src)) { |
1204 | if (is_encap) |
1205 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; |
1206 | else |
1207 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; |
1208 | } |
1209 | |
1210 | l3_key = &headers->l3_key; |
1211 | l3_mask = &headers->l3_mask; |
1212 | |
1213 | if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
1214 | ICE_TC_FLWR_FIELD_SRC_IPV6)) { |
1215 | memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, |
1216 | sizeof(match->key->src.s6_addr)); |
1217 | memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, |
1218 | sizeof(match->mask->src.s6_addr)); |
1219 | } |
1220 | if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | |
1221 | ICE_TC_FLWR_FIELD_DEST_IPV6)) { |
1222 | memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, |
1223 | sizeof(match->key->dst.s6_addr)); |
1224 | memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, |
1225 | sizeof(match->mask->dst.s6_addr)); |
1226 | } |
1227 | |
1228 | return 0; |
1229 | } |
1230 | |
1231 | /** |
1232 | * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter |
1233 | * @match: Pointer to flow match structure |
1234 | * @fltr: Pointer to filter structure |
1235 | * @headers: inner or outer header fields |
1236 | * @is_encap: set true for tunnel |
1237 | */ |
1238 | static void |
1239 | ice_tc_set_tos_ttl(struct flow_match_ip *match, |
1240 | struct ice_tc_flower_fltr *fltr, |
1241 | struct ice_tc_flower_lyr_2_4_hdrs *, |
1242 | bool is_encap) |
1243 | { |
1244 | if (match->mask->tos) { |
1245 | if (is_encap) |
1246 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS; |
1247 | else |
1248 | fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS; |
1249 | |
1250 | headers->l3_key.tos = match->key->tos; |
1251 | headers->l3_mask.tos = match->mask->tos; |
1252 | } |
1253 | |
1254 | if (match->mask->ttl) { |
1255 | if (is_encap) |
1256 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL; |
1257 | else |
1258 | fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL; |
1259 | |
1260 | headers->l3_key.ttl = match->key->ttl; |
1261 | headers->l3_mask.ttl = match->mask->ttl; |
1262 | } |
1263 | } |
1264 | |
1265 | /** |
1266 | * ice_tc_set_port - Parse ports from TC flower filter |
1267 | * @match: Flow match structure |
1268 | * @fltr: Pointer to filter structure |
1269 | * @headers: inner or outer header fields |
1270 | * @is_encap: set true for tunnel port |
1271 | */ |
1272 | static int |
1273 | ice_tc_set_port(struct flow_match_ports match, |
1274 | struct ice_tc_flower_fltr *fltr, |
1275 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1276 | { |
1277 | if (match.key->dst) { |
1278 | if (is_encap) |
1279 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; |
1280 | else |
1281 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; |
1282 | |
1283 | headers->l4_key.dst_port = match.key->dst; |
1284 | headers->l4_mask.dst_port = match.mask->dst; |
1285 | } |
1286 | if (match.key->src) { |
1287 | if (is_encap) |
1288 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; |
1289 | else |
1290 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; |
1291 | |
1292 | headers->l4_key.src_port = match.key->src; |
1293 | headers->l4_mask.src_port = match.mask->src; |
1294 | } |
1295 | return 0; |
1296 | } |
1297 | |
1298 | static struct net_device * |
1299 | ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) |
1300 | { |
1301 | struct flow_action_entry *act; |
1302 | int i; |
1303 | |
1304 | if (ice_is_tunnel_supported(dev)) |
1305 | return dev; |
1306 | |
1307 | flow_action_for_each(i, act, &rule->action) { |
1308 | if (act->id == FLOW_ACTION_REDIRECT && |
1309 | ice_is_tunnel_supported(dev: act->dev)) |
1310 | return act->dev; |
1311 | } |
1312 | |
1313 | return NULL; |
1314 | } |
1315 | |
1316 | /** |
1317 | * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C |
1318 | * @match: Flow match structure |
1319 | * @fltr: Pointer to filter structure |
1320 | * |
1321 | * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). |
1322 | * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, |
1323 | * therefore making GTP-U the default choice (when destination port number is |
1324 | * not specified). |
1325 | */ |
1326 | static int |
1327 | ice_parse_gtp_type(struct flow_match_ports match, |
1328 | struct ice_tc_flower_fltr *fltr) |
1329 | { |
1330 | u16 dst_port; |
1331 | |
1332 | if (match.key->dst) { |
1333 | dst_port = be16_to_cpu(match.key->dst); |
1334 | |
1335 | switch (dst_port) { |
1336 | case 2152: |
1337 | break; |
1338 | case 2123: |
1339 | fltr->tunnel_type = TNL_GTPC; |
1340 | break; |
1341 | default: |
1342 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number" ); |
1343 | return -EINVAL; |
1344 | } |
1345 | } |
1346 | |
1347 | return 0; |
1348 | } |
1349 | |
1350 | static int |
1351 | ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, |
1352 | struct ice_tc_flower_fltr *fltr) |
1353 | { |
1354 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
1355 | struct flow_match_control enc_control; |
1356 | |
1357 | fltr->tunnel_type = ice_tc_tun_get_type(tunnel_dev: dev); |
1358 | headers->l3_key.ip_proto = IPPROTO_UDP; |
1359 | |
1360 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
1361 | struct flow_match_enc_keyid enc_keyid; |
1362 | |
1363 | flow_rule_match_enc_keyid(rule, out: &enc_keyid); |
1364 | |
1365 | if (!enc_keyid.mask->keyid || |
1366 | enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) |
1367 | return -EINVAL; |
1368 | |
1369 | fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; |
1370 | fltr->tenant_id = enc_keyid.key->keyid; |
1371 | } |
1372 | |
1373 | flow_rule_match_enc_control(rule, out: &enc_control); |
1374 | |
1375 | if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
1376 | struct flow_match_ipv4_addrs match; |
1377 | |
1378 | flow_rule_match_enc_ipv4_addrs(rule, out: &match); |
1379 | if (ice_tc_set_ipv4(match: &match, fltr, headers, is_encap: true)) |
1380 | return -EINVAL; |
1381 | } else if (enc_control.key->addr_type == |
1382 | FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
1383 | struct flow_match_ipv6_addrs match; |
1384 | |
1385 | flow_rule_match_enc_ipv6_addrs(rule, out: &match); |
1386 | if (ice_tc_set_ipv6(match: &match, fltr, headers, is_encap: true)) |
1387 | return -EINVAL; |
1388 | } |
1389 | |
1390 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_IP)) { |
1391 | struct flow_match_ip match; |
1392 | |
1393 | flow_rule_match_enc_ip(rule, out: &match); |
1394 | ice_tc_set_tos_ttl(match: &match, fltr, headers, is_encap: true); |
1395 | } |
1396 | |
1397 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_PORTS) && |
1398 | fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { |
1399 | struct flow_match_ports match; |
1400 | |
1401 | flow_rule_match_enc_ports(rule, out: &match); |
1402 | |
1403 | if (fltr->tunnel_type != TNL_GTPU) { |
1404 | if (ice_tc_set_port(match, fltr, headers, is_encap: true)) |
1405 | return -EINVAL; |
1406 | } else { |
1407 | if (ice_parse_gtp_type(match, fltr)) |
1408 | return -EINVAL; |
1409 | } |
1410 | } |
1411 | |
1412 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_OPTS)) { |
1413 | struct flow_match_enc_opts match; |
1414 | |
1415 | flow_rule_match_enc_opts(rule, out: &match); |
1416 | |
1417 | memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0], |
1418 | sizeof(struct gtp_pdu_session_info)); |
1419 | |
1420 | memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], |
1421 | sizeof(struct gtp_pdu_session_info)); |
1422 | |
1423 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; |
1424 | } |
1425 | |
1426 | return 0; |
1427 | } |
1428 | |
1429 | /** |
1430 | * ice_parse_cls_flower - Parse TC flower filters provided by kernel |
1431 | * @vsi: Pointer to the VSI |
1432 | * @filter_dev: Pointer to device on which filter is being added |
1433 | * @f: Pointer to struct flow_cls_offload |
1434 | * @fltr: Pointer to filter structure |
1435 | */ |
1436 | static int |
1437 | ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, |
1438 | struct flow_cls_offload *f, |
1439 | struct ice_tc_flower_fltr *fltr) |
1440 | { |
1441 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
1442 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
1443 | u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; |
1444 | struct flow_dissector *dissector; |
1445 | struct net_device *tunnel_dev; |
1446 | |
1447 | dissector = rule->match.dissector; |
1448 | |
1449 | if (dissector->used_keys & |
1450 | ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | |
1451 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | |
1452 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
1453 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | |
1454 | BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | |
1455 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
1456 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
1457 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | |
1458 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | |
1459 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | |
1460 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | |
1461 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | |
1462 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | |
1463 | BIT_ULL(FLOW_DISSECTOR_KEY_IP) | |
1464 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | |
1465 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | |
1466 | BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) | |
1467 | BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) { |
1468 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used" ); |
1469 | return -EOPNOTSUPP; |
1470 | } |
1471 | |
1472 | tunnel_dev = ice_get_tunnel_device(dev: filter_dev, rule); |
1473 | if (tunnel_dev) { |
1474 | int err; |
1475 | |
1476 | filter_dev = tunnel_dev; |
1477 | |
1478 | err = ice_parse_tunnel_attr(dev: filter_dev, rule, fltr); |
1479 | if (err) { |
1480 | NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes" ); |
1481 | return err; |
1482 | } |
1483 | |
1484 | /* header pointers should point to the inner headers, outer |
1485 | * header were already set by ice_parse_tunnel_attr |
1486 | */ |
1487 | headers = &fltr->inner_headers; |
1488 | } else if (dissector->used_keys & |
1489 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | |
1490 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | |
1491 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | |
1492 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | |
1493 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | |
1494 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | |
1495 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { |
1496 | NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel" ); |
1497 | return -EOPNOTSUPP; |
1498 | } else { |
1499 | fltr->tunnel_type = TNL_LAST; |
1500 | } |
1501 | |
1502 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) { |
1503 | struct flow_match_basic match; |
1504 | |
1505 | flow_rule_match_basic(rule, out: &match); |
1506 | |
1507 | n_proto_key = ntohs(match.key->n_proto); |
1508 | n_proto_mask = ntohs(match.mask->n_proto); |
1509 | |
1510 | if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || |
1511 | fltr->tunnel_type == TNL_GTPU || |
1512 | fltr->tunnel_type == TNL_GTPC) { |
1513 | n_proto_key = 0; |
1514 | n_proto_mask = 0; |
1515 | } else { |
1516 | fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; |
1517 | } |
1518 | |
1519 | headers->l2_key.n_proto = cpu_to_be16(n_proto_key); |
1520 | headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); |
1521 | headers->l3_key.ip_proto = match.key->ip_proto; |
1522 | } |
1523 | |
1524 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
1525 | struct flow_match_eth_addrs match; |
1526 | |
1527 | flow_rule_match_eth_addrs(rule, out: &match); |
1528 | |
1529 | if (!is_zero_ether_addr(addr: match.key->dst)) { |
1530 | ether_addr_copy(dst: headers->l2_key.dst_mac, |
1531 | src: match.key->dst); |
1532 | ether_addr_copy(dst: headers->l2_mask.dst_mac, |
1533 | src: match.mask->dst); |
1534 | fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; |
1535 | } |
1536 | |
1537 | if (!is_zero_ether_addr(addr: match.key->src)) { |
1538 | ether_addr_copy(dst: headers->l2_key.src_mac, |
1539 | src: match.key->src); |
1540 | ether_addr_copy(dst: headers->l2_mask.src_mac, |
1541 | src: match.mask->src); |
1542 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; |
1543 | } |
1544 | } |
1545 | |
1546 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN) || |
1547 | is_vlan_dev(dev: filter_dev)) { |
1548 | struct flow_dissector_key_vlan mask; |
1549 | struct flow_dissector_key_vlan key; |
1550 | struct flow_match_vlan match; |
1551 | |
1552 | if (is_vlan_dev(dev: filter_dev)) { |
1553 | match.key = &key; |
1554 | match.key->vlan_id = vlan_dev_vlan_id(dev: filter_dev); |
1555 | match.key->vlan_priority = 0; |
1556 | match.mask = &mask; |
1557 | memset(match.mask, 0xff, sizeof(*match.mask)); |
1558 | match.mask->vlan_priority = 0; |
1559 | } else { |
1560 | flow_rule_match_vlan(rule, out: &match); |
1561 | } |
1562 | |
1563 | if (match.mask->vlan_id) { |
1564 | if (match.mask->vlan_id == VLAN_VID_MASK) { |
1565 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; |
1566 | headers->vlan_hdr.vlan_id = |
1567 | cpu_to_be16(match.key->vlan_id & |
1568 | VLAN_VID_MASK); |
1569 | } else { |
1570 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask" ); |
1571 | return -EINVAL; |
1572 | } |
1573 | } |
1574 | |
1575 | if (match.mask->vlan_priority) { |
1576 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO; |
1577 | headers->vlan_hdr.vlan_prio = |
1578 | be16_encode_bits(v: match.key->vlan_priority, |
1579 | VLAN_PRIO_MASK); |
1580 | } |
1581 | |
1582 | if (match.mask->vlan_tpid) { |
1583 | headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; |
1584 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID; |
1585 | } |
1586 | } |
1587 | |
1588 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CVLAN)) { |
1589 | struct flow_match_vlan match; |
1590 | |
1591 | if (!ice_is_dvm_ena(hw: &vsi->back->hw)) { |
1592 | NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled" ); |
1593 | return -EINVAL; |
1594 | } |
1595 | |
1596 | flow_rule_match_cvlan(rule, out: &match); |
1597 | |
1598 | if (match.mask->vlan_id) { |
1599 | if (match.mask->vlan_id == VLAN_VID_MASK) { |
1600 | fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN; |
1601 | headers->cvlan_hdr.vlan_id = |
1602 | cpu_to_be16(match.key->vlan_id & |
1603 | VLAN_VID_MASK); |
1604 | } else { |
1605 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1606 | "Bad CVLAN mask" ); |
1607 | return -EINVAL; |
1608 | } |
1609 | } |
1610 | |
1611 | if (match.mask->vlan_priority) { |
1612 | fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO; |
1613 | headers->cvlan_hdr.vlan_prio = |
1614 | be16_encode_bits(v: match.key->vlan_priority, |
1615 | VLAN_PRIO_MASK); |
1616 | } |
1617 | } |
1618 | |
1619 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PPPOE)) { |
1620 | struct flow_match_pppoe match; |
1621 | |
1622 | flow_rule_match_pppoe(rule, out: &match); |
1623 | n_proto_key = ice_tc_set_pppoe(match: &match, fltr, headers); |
1624 | |
1625 | /* If ethertype equals ETH_P_PPP_SES, n_proto might be |
1626 | * overwritten by encapsulated protocol (ppp_proto field) or set |
1627 | * to 0. To correct this, flow_match_pppoe provides the type |
1628 | * field, which contains the actual ethertype (ETH_P_PPP_SES). |
1629 | */ |
1630 | headers->l2_key.n_proto = cpu_to_be16(n_proto_key); |
1631 | headers->l2_mask.n_proto = cpu_to_be16(0xFFFF); |
1632 | fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; |
1633 | } |
1634 | |
1635 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CONTROL)) { |
1636 | struct flow_match_control match; |
1637 | |
1638 | flow_rule_match_control(rule, out: &match); |
1639 | |
1640 | addr_type = match.key->addr_type; |
1641 | } |
1642 | |
1643 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
1644 | struct flow_match_ipv4_addrs match; |
1645 | |
1646 | flow_rule_match_ipv4_addrs(rule, out: &match); |
1647 | if (ice_tc_set_ipv4(match: &match, fltr, headers, is_encap: false)) |
1648 | return -EINVAL; |
1649 | } |
1650 | |
1651 | if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
1652 | struct flow_match_ipv6_addrs match; |
1653 | |
1654 | flow_rule_match_ipv6_addrs(rule, out: &match); |
1655 | if (ice_tc_set_ipv6(match: &match, fltr, headers, is_encap: false)) |
1656 | return -EINVAL; |
1657 | } |
1658 | |
1659 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IP)) { |
1660 | struct flow_match_ip match; |
1661 | |
1662 | flow_rule_match_ip(rule, out: &match); |
1663 | ice_tc_set_tos_ttl(match: &match, fltr, headers, is_encap: false); |
1664 | } |
1665 | |
1666 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_L2TPV3)) { |
1667 | struct flow_match_l2tpv3 match; |
1668 | |
1669 | flow_rule_match_l2tpv3(rule, out: &match); |
1670 | |
1671 | fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID; |
1672 | headers->l2tpv3_hdr.session_id = match.key->session_id; |
1673 | } |
1674 | |
1675 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS)) { |
1676 | struct flow_match_ports match; |
1677 | |
1678 | flow_rule_match_ports(rule, out: &match); |
1679 | if (ice_tc_set_port(match, fltr, headers, is_encap: false)) |
1680 | return -EINVAL; |
1681 | switch (headers->l3_key.ip_proto) { |
1682 | case IPPROTO_TCP: |
1683 | case IPPROTO_UDP: |
1684 | break; |
1685 | default: |
1686 | NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported" ); |
1687 | return -EINVAL; |
1688 | } |
1689 | } |
1690 | return 0; |
1691 | } |
1692 | |
1693 | /** |
1694 | * ice_add_switch_fltr - Add TC flower filters |
1695 | * @vsi: Pointer to VSI |
1696 | * @fltr: Pointer to struct ice_tc_flower_fltr |
1697 | * |
1698 | * Add filter in HW switch block |
1699 | */ |
1700 | static int |
1701 | ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1702 | { |
1703 | if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) |
1704 | return -EOPNOTSUPP; |
1705 | |
1706 | if (ice_is_eswitch_mode_switchdev(pf: vsi->back)) |
1707 | return ice_eswitch_add_tc_fltr(vsi, fltr); |
1708 | |
1709 | return ice_add_tc_flower_adv_fltr(vsi, tc_fltr: fltr); |
1710 | } |
1711 | |
1712 | /** |
1713 | * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers |
1714 | * @vsi: Pointer to VSI |
1715 | * @fltr: Pointer to TC flower filter structure |
1716 | * |
1717 | * Prepare ADQ filter with the required additional header fields |
1718 | */ |
1719 | static int |
1720 | ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1721 | { |
1722 | if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && |
1723 | (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1724 | ICE_TC_FLWR_FIELD_SRC_MAC))) { |
1725 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1726 | "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination" ); |
1727 | return -EOPNOTSUPP; |
1728 | } |
1729 | |
1730 | /* For ADQ, filter must include dest MAC address, otherwise unwanted |
1731 | * packets with unrelated MAC address get delivered to ADQ VSIs as long |
1732 | * as remaining filter criteria is satisfied such as dest IP address |
1733 | * and dest/src L4 port. Below code handles the following cases: |
1734 | * 1. For non-tunnel, if user specify MAC addresses, use them. |
1735 | * 2. For non-tunnel, if user didn't specify MAC address, add implicit |
1736 | * dest MAC to be lower netdev's active unicast MAC address |
1737 | * 3. For tunnel, as of now TC-filter through flower classifier doesn't |
1738 | * have provision for user to specify outer DMAC, hence driver to |
1739 | * implicitly add outer dest MAC to be lower netdev's active unicast |
1740 | * MAC address. |
1741 | */ |
1742 | if (fltr->tunnel_type != TNL_LAST && |
1743 | !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) |
1744 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; |
1745 | |
1746 | if (fltr->tunnel_type == TNL_LAST && |
1747 | !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) |
1748 | fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; |
1749 | |
1750 | if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1751 | ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { |
1752 | ether_addr_copy(dst: fltr->outer_headers.l2_key.dst_mac, |
1753 | src: vsi->netdev->dev_addr); |
1754 | eth_broadcast_addr(addr: fltr->outer_headers.l2_mask.dst_mac); |
1755 | } |
1756 | |
1757 | /* Make sure VLAN is already added to main VSI, before allowing ADQ to |
1758 | * add a VLAN based filter such as MAC + VLAN + L4 port. |
1759 | */ |
1760 | if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { |
1761 | u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); |
1762 | |
1763 | if (!ice_vlan_fltr_exist(hw: &vsi->back->hw, vlan_id, vsi_handle: vsi->idx)) { |
1764 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1765 | "Unable to add filter because legacy VLAN filter for specified destination doesn't exist" ); |
1766 | return -EINVAL; |
1767 | } |
1768 | } |
1769 | return 0; |
1770 | } |
1771 | |
1772 | /** |
1773 | * ice_handle_tclass_action - Support directing to a traffic class |
1774 | * @vsi: Pointer to VSI |
1775 | * @cls_flower: Pointer to TC flower offload structure |
1776 | * @fltr: Pointer to TC flower filter structure |
1777 | * |
1778 | * Support directing traffic to a traffic class/queue-set |
1779 | */ |
1780 | static int |
1781 | ice_handle_tclass_action(struct ice_vsi *vsi, |
1782 | struct flow_cls_offload *cls_flower, |
1783 | struct ice_tc_flower_fltr *fltr) |
1784 | { |
1785 | int tc = tc_classid_to_hwtc(dev: vsi->netdev, classid: cls_flower->classid); |
1786 | |
1787 | /* user specified hw_tc (must be non-zero for ADQ TC), action is forward |
1788 | * to hw_tc (i.e. ADQ channel number) |
1789 | */ |
1790 | if (tc < ICE_CHNL_START_TC) { |
1791 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1792 | "Unable to add filter because of unsupported destination" ); |
1793 | return -EOPNOTSUPP; |
1794 | } |
1795 | if (!(vsi->all_enatc & BIT(tc))) { |
1796 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1797 | "Unable to add filter because of non-existence destination" ); |
1798 | return -EINVAL; |
1799 | } |
1800 | fltr->action.fltr_act = ICE_FWD_TO_VSI; |
1801 | fltr->action.fwd.tc.tc_class = tc; |
1802 | |
1803 | return ice_prep_adq_filter(vsi, fltr); |
1804 | } |
1805 | |
1806 | static int |
1807 | ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, |
1808 | struct flow_action_entry *act) |
1809 | { |
1810 | struct ice_vsi *ch_vsi = NULL; |
1811 | u16 queue = act->rx_queue; |
1812 | |
1813 | if (queue >= vsi->num_rxq) { |
1814 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1815 | "Unable to add filter because specified queue is invalid" ); |
1816 | return -EINVAL; |
1817 | } |
1818 | fltr->action.fltr_act = ICE_FWD_TO_Q; |
1819 | fltr->action.fwd.q.queue = queue; |
1820 | /* determine corresponding HW queue */ |
1821 | fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue]; |
1822 | |
1823 | /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare |
1824 | * ADQ switch filter |
1825 | */ |
1826 | ch_vsi = ice_locate_vsi_using_queue(vsi, queue: fltr->action.fwd.q.queue); |
1827 | if (!ch_vsi) |
1828 | return -EINVAL; |
1829 | fltr->dest_vsi = ch_vsi; |
1830 | if (!ice_is_chnl_fltr(f: fltr)) |
1831 | return 0; |
1832 | |
1833 | return ice_prep_adq_filter(vsi, fltr); |
1834 | } |
1835 | |
1836 | static int |
1837 | ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, |
1838 | struct flow_action_entry *act) |
1839 | { |
1840 | switch (act->id) { |
1841 | case FLOW_ACTION_RX_QUEUE_MAPPING: |
1842 | /* forward to queue */ |
1843 | return ice_tc_forward_to_queue(vsi, fltr, act); |
1844 | case FLOW_ACTION_DROP: |
1845 | fltr->action.fltr_act = ICE_DROP_PACKET; |
1846 | return 0; |
1847 | default: |
1848 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action" ); |
1849 | return -EOPNOTSUPP; |
1850 | } |
1851 | } |
1852 | |
1853 | /** |
1854 | * ice_parse_tc_flower_actions - Parse the actions for a TC filter |
1855 | * @filter_dev: Pointer to device on which filter is being added |
1856 | * @vsi: Pointer to VSI |
1857 | * @cls_flower: Pointer to TC flower offload structure |
1858 | * @fltr: Pointer to TC flower filter structure |
1859 | * |
1860 | * Parse the actions for a TC filter |
1861 | */ |
1862 | static int ice_parse_tc_flower_actions(struct net_device *filter_dev, |
1863 | struct ice_vsi *vsi, |
1864 | struct flow_cls_offload *cls_flower, |
1865 | struct ice_tc_flower_fltr *fltr) |
1866 | { |
1867 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: cls_flower); |
1868 | struct flow_action *flow_action = &rule->action; |
1869 | struct flow_action_entry *act; |
1870 | int i, err; |
1871 | |
1872 | if (cls_flower->classid) |
1873 | return ice_handle_tclass_action(vsi, cls_flower, fltr); |
1874 | |
1875 | if (!flow_action_has_entries(action: flow_action)) |
1876 | return -EINVAL; |
1877 | |
1878 | flow_action_for_each(i, act, flow_action) { |
1879 | if (ice_is_eswitch_mode_switchdev(pf: vsi->back)) |
1880 | err = ice_eswitch_tc_parse_action(filter_dev, fltr, act); |
1881 | else |
1882 | err = ice_tc_parse_action(vsi, fltr, act); |
1883 | if (err) |
1884 | return err; |
1885 | continue; |
1886 | } |
1887 | return 0; |
1888 | } |
1889 | |
1890 | /** |
1891 | * ice_del_tc_fltr - deletes a filter from HW table |
1892 | * @vsi: Pointer to VSI |
1893 | * @fltr: Pointer to struct ice_tc_flower_fltr |
1894 | * |
1895 | * This function deletes a filter from HW table and manages book-keeping |
1896 | */ |
1897 | static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1898 | { |
1899 | struct ice_rule_query_data rule_rem; |
1900 | struct ice_pf *pf = vsi->back; |
1901 | int err; |
1902 | |
1903 | rule_rem.rid = fltr->rid; |
1904 | rule_rem.rule_id = fltr->rule_id; |
1905 | rule_rem.vsi_handle = fltr->dest_vsi_handle; |
1906 | err = ice_rem_adv_rule_by_id(hw: &pf->hw, remove_entry: &rule_rem); |
1907 | if (err) { |
1908 | if (err == -ENOENT) { |
1909 | NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist" ); |
1910 | return -ENOENT; |
1911 | } |
1912 | NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter" ); |
1913 | return -EIO; |
1914 | } |
1915 | |
1916 | /* update advanced switch filter count for destination |
1917 | * VSI if filter destination was VSI |
1918 | */ |
1919 | if (fltr->dest_vsi) { |
1920 | if (fltr->dest_vsi->type == ICE_VSI_CHNL) { |
1921 | fltr->dest_vsi->num_chnl_fltr--; |
1922 | |
1923 | /* keeps track of channel filters for PF VSI */ |
1924 | if (vsi->type == ICE_VSI_PF && |
1925 | (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1926 | ICE_TC_FLWR_FIELD_ENC_DST_MAC))) |
1927 | pf->num_dmac_chnl_fltrs--; |
1928 | } |
1929 | } |
1930 | return 0; |
1931 | } |
1932 | |
1933 | /** |
1934 | * ice_add_tc_fltr - adds a TC flower filter |
1935 | * @netdev: Pointer to netdev |
1936 | * @vsi: Pointer to VSI |
1937 | * @f: Pointer to flower offload structure |
1938 | * @__fltr: Pointer to struct ice_tc_flower_fltr |
1939 | * |
1940 | * This function parses TC-flower input fields, parses action, |
1941 | * and adds a filter. |
1942 | */ |
1943 | static int |
1944 | ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, |
1945 | struct flow_cls_offload *f, |
1946 | struct ice_tc_flower_fltr **__fltr) |
1947 | { |
1948 | struct ice_tc_flower_fltr *fltr; |
1949 | int err; |
1950 | |
1951 | /* by default, set output to be INVALID */ |
1952 | *__fltr = NULL; |
1953 | |
1954 | fltr = kzalloc(size: sizeof(*fltr), GFP_KERNEL); |
1955 | if (!fltr) |
1956 | return -ENOMEM; |
1957 | |
1958 | fltr->cookie = f->cookie; |
1959 | fltr->extack = f->common.extack; |
1960 | fltr->src_vsi = vsi; |
1961 | INIT_HLIST_NODE(h: &fltr->tc_flower_node); |
1962 | |
1963 | err = ice_parse_cls_flower(filter_dev: netdev, vsi, f, fltr); |
1964 | if (err < 0) |
1965 | goto err; |
1966 | |
1967 | err = ice_parse_tc_flower_actions(filter_dev: netdev, vsi, cls_flower: f, fltr); |
1968 | if (err < 0) |
1969 | goto err; |
1970 | |
1971 | err = ice_add_switch_fltr(vsi, fltr); |
1972 | if (err < 0) |
1973 | goto err; |
1974 | |
1975 | /* return the newly created filter */ |
1976 | *__fltr = fltr; |
1977 | |
1978 | return 0; |
1979 | err: |
1980 | kfree(objp: fltr); |
1981 | return err; |
1982 | } |
1983 | |
1984 | /** |
1985 | * ice_find_tc_flower_fltr - Find the TC flower filter in the list |
1986 | * @pf: Pointer to PF |
1987 | * @cookie: filter specific cookie |
1988 | */ |
1989 | static struct ice_tc_flower_fltr * |
1990 | ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) |
1991 | { |
1992 | struct ice_tc_flower_fltr *fltr; |
1993 | |
1994 | hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) |
1995 | if (cookie == fltr->cookie) |
1996 | return fltr; |
1997 | |
1998 | return NULL; |
1999 | } |
2000 | |
2001 | /** |
2002 | * ice_add_cls_flower - add TC flower filters |
2003 | * @netdev: Pointer to filter device |
2004 | * @vsi: Pointer to VSI |
2005 | * @cls_flower: Pointer to flower offload structure |
2006 | */ |
2007 | int |
2008 | ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, |
2009 | struct flow_cls_offload *cls_flower) |
2010 | { |
2011 | struct netlink_ext_ack *extack = cls_flower->common.extack; |
2012 | struct net_device *vsi_netdev = vsi->netdev; |
2013 | struct ice_tc_flower_fltr *fltr; |
2014 | struct ice_pf *pf = vsi->back; |
2015 | int err; |
2016 | |
2017 | if (ice_is_reset_in_progress(state: pf->state)) |
2018 | return -EBUSY; |
2019 | if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) |
2020 | return -EINVAL; |
2021 | |
2022 | if (ice_is_port_repr_netdev(netdev)) |
2023 | vsi_netdev = netdev; |
2024 | |
2025 | if (!(vsi_netdev->features & NETIF_F_HW_TC) && |
2026 | !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { |
2027 | /* Based on TC indirect notifications from kernel, all ice |
2028 | * devices get an instance of rule from higher level device. |
2029 | * Avoid triggering explicit error in this case. |
2030 | */ |
2031 | if (netdev == vsi_netdev) |
2032 | NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again" ); |
2033 | return -EINVAL; |
2034 | } |
2035 | |
2036 | /* avoid duplicate entries, if exists - return error */ |
2037 | fltr = ice_find_tc_flower_fltr(pf, cookie: cls_flower->cookie); |
2038 | if (fltr) { |
2039 | NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring" ); |
2040 | return -EEXIST; |
2041 | } |
2042 | |
2043 | /* prep and add TC-flower filter in HW */ |
2044 | err = ice_add_tc_fltr(netdev, vsi, f: cls_flower, fltr: &fltr); |
2045 | if (err) |
2046 | return err; |
2047 | |
2048 | /* add filter into an ordered list */ |
2049 | hlist_add_head(n: &fltr->tc_flower_node, h: &pf->tc_flower_fltr_list); |
2050 | return 0; |
2051 | } |
2052 | |
2053 | /** |
2054 | * ice_del_cls_flower - delete TC flower filters |
2055 | * @vsi: Pointer to VSI |
2056 | * @cls_flower: Pointer to struct flow_cls_offload |
2057 | */ |
2058 | int |
2059 | ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) |
2060 | { |
2061 | struct ice_tc_flower_fltr *fltr; |
2062 | struct ice_pf *pf = vsi->back; |
2063 | int err; |
2064 | |
2065 | /* find filter */ |
2066 | fltr = ice_find_tc_flower_fltr(pf, cookie: cls_flower->cookie); |
2067 | if (!fltr) { |
2068 | if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) && |
2069 | hlist_empty(h: &pf->tc_flower_fltr_list)) |
2070 | return 0; |
2071 | |
2072 | NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it" ); |
2073 | return -EINVAL; |
2074 | } |
2075 | |
2076 | fltr->extack = cls_flower->common.extack; |
2077 | /* delete filter from HW */ |
2078 | err = ice_del_tc_fltr(vsi, fltr); |
2079 | if (err) |
2080 | return err; |
2081 | |
2082 | /* delete filter from an ordered list */ |
2083 | hlist_del(n: &fltr->tc_flower_node); |
2084 | |
2085 | /* free the filter node */ |
2086 | kfree(objp: fltr); |
2087 | |
2088 | return 0; |
2089 | } |
2090 | |
2091 | /** |
2092 | * ice_replay_tc_fltrs - replay TC filters |
2093 | * @pf: pointer to PF struct |
2094 | */ |
2095 | void ice_replay_tc_fltrs(struct ice_pf *pf) |
2096 | { |
2097 | struct ice_tc_flower_fltr *fltr; |
2098 | struct hlist_node *node; |
2099 | |
2100 | hlist_for_each_entry_safe(fltr, node, |
2101 | &pf->tc_flower_fltr_list, |
2102 | tc_flower_node) { |
2103 | fltr->extack = NULL; |
2104 | ice_add_switch_fltr(vsi: fltr->src_vsi, fltr); |
2105 | } |
2106 | } |
2107 | |