1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #ifndef __MLX5_EN_TC_H__ |
34 | #define __MLX5_EN_TC_H__ |
35 | |
36 | #include <net/pkt_cls.h> |
37 | #include "en.h" |
38 | #include "eswitch.h" |
39 | #include "en/tc_ct.h" |
40 | #include "en/tc_tun.h" |
41 | #include "en/tc/int_port.h" |
42 | #include "en/tc/meter.h" |
43 | #include "en_rep.h" |
44 | |
45 | #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff |
46 | |
47 | #ifdef CONFIG_MLX5_ESWITCH |
48 | |
49 | #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\ |
50 | sizeof(struct mlx5_nic_flow_attr)) |
51 | #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\ |
52 | sizeof(struct mlx5_esw_flow_attr)) |
53 | #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\ |
54 | ESW_FLOW_ATTR_SZ :\ |
55 | NIC_FLOW_ATTR_SZ) |
56 | |
57 | struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc); |
58 | int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); |
59 | |
60 | struct mlx5e_tc_update_priv { |
61 | struct net_device *fwd_dev; |
62 | bool skb_done; |
63 | bool forward_tx; |
64 | }; |
65 | |
66 | struct mlx5_nic_flow_attr { |
67 | u32 flow_tag; |
68 | u32 hairpin_tirn; |
69 | struct mlx5_flow_table *hairpin_ft; |
70 | }; |
71 | |
72 | struct mlx5_flow_attr { |
73 | u32 action; |
74 | unsigned long tc_act_cookies[TCA_ACT_MAX_PRIO]; |
75 | struct mlx5_fc *counter; |
76 | struct mlx5_modify_hdr *modify_hdr; |
77 | struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ |
78 | struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ |
79 | struct mlx5_ct_attr ct_attr; |
80 | struct mlx5e_sample_attr sample_attr; |
81 | struct mlx5e_meter_attr meter_attr; |
82 | struct mlx5e_tc_flow_parse_attr *parse_attr; |
83 | u32 chain; |
84 | u16 prio; |
85 | u16 tc_act_cookies_count; |
86 | u32 dest_chain; |
87 | struct mlx5_flow_table *ft; |
88 | struct mlx5_flow_table *dest_ft; |
89 | u8 inner_match_level; |
90 | u8 outer_match_level; |
91 | u8 tun_ip_version; |
92 | int tunnel_id; /* mapped tunnel id */ |
93 | u32 flags; |
94 | u32 exe_aso_type; |
95 | struct list_head list; |
96 | struct mlx5e_post_act_handle *post_act_handle; |
97 | struct mlx5_flow_attr *branch_true; |
98 | struct mlx5_flow_attr *branch_false; |
99 | struct mlx5_flow_attr *jumping_attr; |
100 | struct mlx5_flow_handle *act_id_restore_rule; |
101 | /* keep this union last */ |
102 | union { |
103 | DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr); |
104 | DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr); |
105 | }; |
106 | }; |
107 | |
108 | enum { |
109 | MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0), |
110 | MLX5_ATTR_FLAG_SLOW_PATH = BIT(1), |
111 | MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2), |
112 | MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3), |
113 | MLX5_ATTR_FLAG_SAMPLE = BIT(4), |
114 | MLX5_ATTR_FLAG_ACCEPT = BIT(5), |
115 | MLX5_ATTR_FLAG_CT = BIT(6), |
116 | MLX5_ATTR_FLAG_TERMINATING = BIT(7), |
117 | MLX5_ATTR_FLAG_MTU = BIT(8), |
118 | }; |
119 | |
120 | /* Returns true if any of the flags that require skipping further TC/NF processing are set. */ |
121 | static inline bool |
122 | mlx5e_tc_attr_flags_skip(u32 attr_flags) |
123 | { |
124 | return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT); |
125 | } |
126 | |
127 | struct mlx5_rx_tun_attr { |
128 | u16 decap_vport; |
129 | union { |
130 | __be32 v4; |
131 | struct in6_addr v6; |
132 | } src_ip; /* Valid if decap_vport is not zero */ |
133 | union { |
134 | __be32 v4; |
135 | struct in6_addr v6; |
136 | } dst_ip; /* Valid if decap_vport is not zero */ |
137 | }; |
138 | |
139 | #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 |
140 | #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) |
141 | |
142 | #define MLX5E_TC_MAX_INT_PORT_NUM (8) |
143 | |
144 | #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
145 | |
146 | struct tunnel_match_key { |
147 | struct flow_dissector_key_control enc_control; |
148 | struct flow_dissector_key_keyid enc_key_id; |
149 | struct flow_dissector_key_ports enc_tp; |
150 | struct flow_dissector_key_ip enc_ip; |
151 | union { |
152 | struct flow_dissector_key_ipv4_addrs enc_ipv4; |
153 | struct flow_dissector_key_ipv6_addrs enc_ipv6; |
154 | }; |
155 | |
156 | int filter_ifindex; |
157 | }; |
158 | |
159 | struct tunnel_match_enc_opts { |
160 | struct flow_dissector_key_enc_opts key; |
161 | struct flow_dissector_key_enc_opts mask; |
162 | }; |
163 | |
164 | /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. |
165 | * Upper TUNNEL_INFO_BITS for general tunnel info. |
166 | * Lower ENC_OPTS_BITS bits for enc_opts. |
167 | */ |
168 | #define TUNNEL_INFO_BITS 12 |
169 | #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) |
170 | #define ENC_OPTS_BITS 11 |
171 | #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) |
172 | #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) |
173 | #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) |
174 | |
175 | enum { |
176 | MLX5E_TC_FLAG_INGRESS_BIT, |
177 | MLX5E_TC_FLAG_EGRESS_BIT, |
178 | MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, |
179 | MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, |
180 | MLX5E_TC_FLAG_FT_OFFLOAD_BIT, |
181 | MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, |
182 | }; |
183 | |
184 | #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) |
185 | |
186 | int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv); |
187 | void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv); |
188 | |
189 | int mlx5e_tc_ht_init(struct rhashtable *tc_ht); |
190 | void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht); |
191 | |
192 | int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, |
193 | struct flow_cls_offload *f, unsigned long flags); |
194 | int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, |
195 | struct flow_cls_offload *f, unsigned long flags); |
196 | |
197 | int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, |
198 | struct flow_cls_offload *f, unsigned long flags); |
199 | int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, |
200 | struct flow_offload_action *fl_act); |
201 | |
202 | int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, |
203 | struct tc_cls_matchall_offload *f); |
204 | int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, |
205 | struct tc_cls_matchall_offload *f); |
206 | |
207 | struct mlx5e_encap_entry; |
208 | void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, |
209 | struct mlx5e_encap_entry *e, |
210 | struct list_head *flow_list); |
211 | void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, |
212 | struct mlx5e_encap_entry *e, |
213 | struct list_head *flow_list); |
214 | bool mlx5e_encap_take(struct mlx5e_encap_entry *e); |
215 | void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); |
216 | |
217 | void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list); |
218 | void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); |
219 | |
220 | struct mlx5e_neigh_hash_entry; |
221 | struct mlx5e_encap_entry * |
222 | mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe, |
223 | struct mlx5e_encap_entry *e); |
224 | void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); |
225 | |
226 | void mlx5e_tc_reoffload_flows_work(struct work_struct *work); |
227 | |
228 | enum mlx5e_tc_attr_to_reg { |
229 | MAPPED_OBJ_TO_REG, |
230 | VPORT_TO_REG, |
231 | TUNNEL_TO_REG, |
232 | CTSTATE_TO_REG, |
233 | ZONE_TO_REG, |
234 | ZONE_RESTORE_TO_REG, |
235 | MARK_TO_REG, |
236 | LABELS_TO_REG, |
237 | FTEID_TO_REG, |
238 | NIC_MAPPED_OBJ_TO_REG, |
239 | NIC_ZONE_RESTORE_TO_REG, |
240 | PACKET_COLOR_TO_REG, |
241 | }; |
242 | |
243 | struct mlx5e_tc_attr_to_reg_mapping { |
244 | int mfield; /* rewrite field */ |
245 | int moffset; /* bit offset of mfield */ |
246 | int mlen; /* bits to rewrite/match */ |
247 | |
248 | int soffset; /* byte offset of spec for match */ |
249 | }; |
250 | |
251 | extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; |
252 | |
253 | #define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset) |
254 | #define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen) |
255 | #define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0)) |
256 | |
257 | bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, |
258 | struct net_device *out_dev); |
259 | |
260 | int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, |
261 | struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, |
262 | enum mlx5_flow_namespace_type ns, |
263 | enum mlx5e_tc_attr_to_reg type, |
264 | u32 data); |
265 | |
266 | void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, |
267 | struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, |
268 | enum mlx5e_tc_attr_to_reg type, |
269 | int act_id, u32 data); |
270 | |
271 | void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, |
272 | enum mlx5e_tc_attr_to_reg type, |
273 | u32 data, |
274 | u32 mask); |
275 | |
276 | void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, |
277 | enum mlx5e_tc_attr_to_reg type, |
278 | u32 *data, |
279 | u32 *mask); |
280 | |
281 | int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, |
282 | struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, |
283 | enum mlx5_flow_namespace_type ns, |
284 | enum mlx5e_tc_attr_to_reg type, |
285 | u32 data); |
286 | |
287 | int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, |
288 | struct mlx5e_tc_flow *flow, |
289 | struct mlx5_flow_attr *attr); |
290 | |
291 | void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, |
292 | struct mlx5e_tc_flow *flow, |
293 | struct mlx5_flow_attr *attr); |
294 | |
295 | void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, |
296 | struct flow_match_basic *match, bool outer, |
297 | void *, void *); |
298 | |
299 | int mlx5e_tc_nic_init(struct mlx5e_priv *priv); |
300 | void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); |
301 | |
302 | int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
303 | void *cb_priv); |
304 | |
305 | struct mlx5_flow_handle * |
306 | mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, |
307 | struct mlx5_flow_spec *spec, |
308 | struct mlx5_flow_attr *attr); |
309 | void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, |
310 | struct mlx5_flow_handle *rule, |
311 | struct mlx5_flow_attr *attr); |
312 | |
313 | struct mlx5_flow_handle * |
314 | mlx5_tc_rule_insert(struct mlx5e_priv *priv, |
315 | struct mlx5_flow_spec *spec, |
316 | struct mlx5_flow_attr *attr); |
317 | void |
318 | mlx5_tc_rule_delete(struct mlx5e_priv *priv, |
319 | struct mlx5_flow_handle *rule, |
320 | struct mlx5_flow_attr *attr); |
321 | |
322 | bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev); |
323 | int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, |
324 | u16 *vport); |
325 | |
326 | int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, |
327 | struct mlx5_flow_attr *attr, |
328 | int ifindex, |
329 | enum mlx5e_tc_int_port_type type, |
330 | u32 *action, |
331 | int out_index); |
332 | #else /* CONFIG_MLX5_CLS_ACT */ |
333 | static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } |
334 | static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} |
335 | static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; } |
336 | static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {} |
337 | static inline int |
338 | mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) |
339 | { return -EOPNOTSUPP; } |
340 | |
341 | #endif /* CONFIG_MLX5_CLS_ACT */ |
342 | |
343 | struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type); |
344 | |
345 | struct mlx5_flow_handle * |
346 | mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, |
347 | struct mlx5_flow_spec *spec, |
348 | struct mlx5_flow_attr *attr); |
349 | void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, |
350 | struct mlx5_flow_handle *rule, |
351 | struct mlx5_flow_attr *attr); |
352 | |
353 | #else /* CONFIG_MLX5_ESWITCH */ |
354 | static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } |
355 | static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} |
356 | static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, |
357 | unsigned long flags) |
358 | { |
359 | return 0; |
360 | } |
361 | |
362 | static inline int |
363 | mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) |
364 | { return -EOPNOTSUPP; } |
365 | #endif |
366 | |
367 | #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
368 | struct mlx5e_tc_table *mlx5e_tc_table_alloc(void); |
369 | void mlx5e_tc_table_free(struct mlx5e_tc_table *tc); |
370 | static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) |
371 | { |
372 | u32 chain, reg_b; |
373 | |
374 | reg_b = be32_to_cpu(cqe->ft_metadata); |
375 | |
376 | if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS)) |
377 | return false; |
378 | |
379 | chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; |
380 | if (chain) |
381 | return true; |
382 | |
383 | return false; |
384 | } |
385 | |
386 | bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb); |
387 | bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, |
388 | struct mapping_ctx *mapping_ctx, u32 mapped_obj_id, |
389 | struct mlx5_tc_ct_priv *ct_priv, |
390 | u32 zone_restore_id, u32 tunnel_id, |
391 | struct mlx5e_tc_update_priv *tc_priv); |
392 | #else /* CONFIG_MLX5_CLS_ACT */ |
393 | static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; } |
394 | static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {} |
395 | static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) |
396 | { return false; } |
397 | static inline bool |
398 | mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) |
399 | { return true; } |
400 | #endif |
401 | |
402 | int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, |
403 | u64 act_miss_cookie, u32 *act_miss_mapping); |
404 | void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, |
405 | u32 act_miss_mapping); |
406 | |
407 | #endif /* __MLX5_EN_TC_H__ */ |
408 | |