1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2018-2020, Intel Corporation. */ |
3 | |
4 | #include "ice.h" |
5 | #include <net/rps.h> |
6 | |
7 | /** |
8 | * ice_is_arfs_active - helper to check is aRFS is active |
9 | * @vsi: VSI to check |
10 | */ |
11 | static bool ice_is_arfs_active(struct ice_vsi *vsi) |
12 | { |
13 | return !!vsi->arfs_fltr_list; |
14 | } |
15 | |
16 | /** |
17 | * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters |
18 | * @hw: pointer to the HW structure |
19 | * @flow_type: flow type as Flow Director understands it |
20 | * |
21 | * Flow Director will query this function to see if aRFS is currently using |
22 | * the specified flow_type for perfect (4-tuple) filters. |
23 | */ |
24 | bool |
25 | ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type) |
26 | { |
27 | struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; |
28 | struct ice_pf *pf = hw->back; |
29 | struct ice_vsi *vsi; |
30 | |
31 | vsi = ice_get_main_vsi(pf); |
32 | if (!vsi) |
33 | return false; |
34 | |
35 | arfs_fltr_cntrs = vsi->arfs_fltr_cntrs; |
36 | |
37 | /* active counters can be updated by multiple CPUs */ |
38 | smp_mb__before_atomic(); |
39 | switch (flow_type) { |
40 | case ICE_FLTR_PTYPE_NONF_IPV4_UDP: |
41 | return atomic_read(v: &arfs_fltr_cntrs->active_udpv4_cnt) > 0; |
42 | case ICE_FLTR_PTYPE_NONF_IPV6_UDP: |
43 | return atomic_read(v: &arfs_fltr_cntrs->active_udpv6_cnt) > 0; |
44 | case ICE_FLTR_PTYPE_NONF_IPV4_TCP: |
45 | return atomic_read(v: &arfs_fltr_cntrs->active_tcpv4_cnt) > 0; |
46 | case ICE_FLTR_PTYPE_NONF_IPV6_TCP: |
47 | return atomic_read(v: &arfs_fltr_cntrs->active_tcpv6_cnt) > 0; |
48 | default: |
49 | return false; |
50 | } |
51 | } |
52 | |
53 | /** |
54 | * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS |
55 | * @vsi: VSI that aRFS is active on |
56 | * @entry: aRFS entry used to change counters |
57 | * @add: true to increment counter, false to decrement |
58 | */ |
59 | static void |
60 | ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi, |
61 | struct ice_arfs_entry *entry, bool add) |
62 | { |
63 | struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs; |
64 | |
65 | switch (entry->fltr_info.flow_type) { |
66 | case ICE_FLTR_PTYPE_NONF_IPV4_TCP: |
67 | if (add) |
68 | atomic_inc(v: &fltr_cntrs->active_tcpv4_cnt); |
69 | else |
70 | atomic_dec(v: &fltr_cntrs->active_tcpv4_cnt); |
71 | break; |
72 | case ICE_FLTR_PTYPE_NONF_IPV6_TCP: |
73 | if (add) |
74 | atomic_inc(v: &fltr_cntrs->active_tcpv6_cnt); |
75 | else |
76 | atomic_dec(v: &fltr_cntrs->active_tcpv6_cnt); |
77 | break; |
78 | case ICE_FLTR_PTYPE_NONF_IPV4_UDP: |
79 | if (add) |
80 | atomic_inc(v: &fltr_cntrs->active_udpv4_cnt); |
81 | else |
82 | atomic_dec(v: &fltr_cntrs->active_udpv4_cnt); |
83 | break; |
84 | case ICE_FLTR_PTYPE_NONF_IPV6_UDP: |
85 | if (add) |
86 | atomic_inc(v: &fltr_cntrs->active_udpv6_cnt); |
87 | else |
88 | atomic_dec(v: &fltr_cntrs->active_udpv6_cnt); |
89 | break; |
90 | default: |
91 | dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n" , |
92 | entry->fltr_info.flow_type); |
93 | } |
94 | } |
95 | |
96 | /** |
97 | * ice_arfs_del_flow_rules - delete the rules passed in from HW |
98 | * @vsi: VSI for the flow rules that need to be deleted |
99 | * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion |
100 | * |
101 | * Loop through the delete list passed in and remove the rules from HW. After |
102 | * each rule is deleted, disconnect and free the ice_arfs_entry because it is no |
103 | * longer being referenced by the aRFS hash table. |
104 | */ |
105 | static void |
106 | ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head) |
107 | { |
108 | struct ice_arfs_entry *e; |
109 | struct hlist_node *n; |
110 | struct device *dev; |
111 | |
112 | dev = ice_pf_to_dev(vsi->back); |
113 | |
114 | hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { |
115 | int result; |
116 | |
117 | result = ice_fdir_write_fltr(pf: vsi->back, input: &e->fltr_info, add: false, |
118 | is_tun: false); |
119 | if (!result) |
120 | ice_arfs_update_active_fltr_cntrs(vsi, entry: e, add: false); |
121 | else |
122 | dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n" , |
123 | result, e->fltr_state, e->fltr_info.fltr_id, |
124 | e->flow_id, e->fltr_info.q_index); |
125 | |
126 | /* The aRFS hash table is no longer referencing this entry */ |
127 | hlist_del(n: &e->list_entry); |
128 | devm_kfree(dev, p: e); |
129 | } |
130 | } |
131 | |
132 | /** |
133 | * ice_arfs_add_flow_rules - add the rules passed in from HW |
134 | * @vsi: VSI for the flow rules that need to be added |
135 | * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition |
136 | * |
137 | * Loop through the add list passed in and remove the rules from HW. After each |
138 | * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free |
139 | * the ice_arfs_entry(s) because they are still being referenced in the aRFS |
140 | * hash table. |
141 | */ |
142 | static void |
143 | ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head) |
144 | { |
145 | struct ice_arfs_entry_ptr *ep; |
146 | struct hlist_node *n; |
147 | struct device *dev; |
148 | |
149 | dev = ice_pf_to_dev(vsi->back); |
150 | |
151 | hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { |
152 | int result; |
153 | |
154 | result = ice_fdir_write_fltr(pf: vsi->back, |
155 | input: &ep->arfs_entry->fltr_info, add: true, |
156 | is_tun: false); |
157 | if (!result) |
158 | ice_arfs_update_active_fltr_cntrs(vsi, entry: ep->arfs_entry, |
159 | add: true); |
160 | else |
161 | dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n" , |
162 | result, ep->arfs_entry->fltr_state, |
163 | ep->arfs_entry->fltr_info.fltr_id, |
164 | ep->arfs_entry->flow_id, |
165 | ep->arfs_entry->fltr_info.q_index); |
166 | |
167 | hlist_del(n: &ep->list_entry); |
168 | devm_kfree(dev, p: ep); |
169 | } |
170 | } |
171 | |
172 | /** |
173 | * ice_arfs_is_flow_expired - check if the aRFS entry has expired |
174 | * @vsi: VSI containing the aRFS entry |
175 | * @arfs_entry: aRFS entry that's being checked for expiration |
176 | * |
177 | * Return true if the flow has expired, else false. This function should be used |
178 | * to determine whether or not an aRFS entry should be removed from the hardware |
179 | * and software structures. |
180 | */ |
181 | static bool |
182 | ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry) |
183 | { |
184 | #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) |
185 | if (rps_may_expire_flow(dev: vsi->netdev, rxq_index: arfs_entry->fltr_info.q_index, |
186 | flow_id: arfs_entry->flow_id, |
187 | filter_id: arfs_entry->fltr_info.fltr_id)) |
188 | return true; |
189 | |
190 | /* expiration timer only used for UDP filters */ |
191 | if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP && |
192 | arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP) |
193 | return false; |
194 | |
195 | return time_in_range64(arfs_entry->time_activated + |
196 | ICE_ARFS_TIME_DELTA_EXPIRATION, |
197 | arfs_entry->time_activated, get_jiffies_64()); |
198 | } |
199 | |
200 | /** |
201 | * ice_arfs_update_flow_rules - add/delete aRFS rules in HW |
202 | * @vsi: the VSI to be forwarded to |
203 | * @idx: index into the table of aRFS filter lists. Obtained from skb->hash |
204 | * @add_list: list to populate with filters to be added to Flow Director |
205 | * @del_list: list to populate with filters to be deleted from Flow Director |
206 | * |
207 | * Iterate over the hlist at the index given in the aRFS hash table and |
208 | * determine if there are any aRFS entries that need to be either added or |
209 | * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the |
210 | * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and |
211 | * the flow has expired delete the filter from HW. The caller of this function |
212 | * is expected to add/delete rules on the add_list/del_list respectively. |
213 | */ |
214 | static void |
215 | ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx, |
216 | struct hlist_head *add_list, |
217 | struct hlist_head *del_list) |
218 | { |
219 | struct ice_arfs_entry *e; |
220 | struct hlist_node *n; |
221 | struct device *dev; |
222 | |
223 | dev = ice_pf_to_dev(vsi->back); |
224 | |
225 | /* go through the aRFS hlist at this idx and check for needed updates */ |
226 | hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry) |
227 | /* check if filter needs to be added to HW */ |
228 | if (e->fltr_state == ICE_ARFS_INACTIVE) { |
229 | enum ice_fltr_ptype flow_type = e->fltr_info.flow_type; |
230 | struct ice_arfs_entry_ptr *ep = |
231 | devm_kzalloc(dev, size: sizeof(*ep), GFP_ATOMIC); |
232 | |
233 | if (!ep) |
234 | continue; |
235 | INIT_HLIST_NODE(h: &ep->list_entry); |
236 | /* reference aRFS entry to add HW filter */ |
237 | ep->arfs_entry = e; |
238 | hlist_add_head(n: &ep->list_entry, h: add_list); |
239 | e->fltr_state = ICE_ARFS_ACTIVE; |
240 | /* expiration timer only used for UDP flows */ |
241 | if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || |
242 | flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) |
243 | e->time_activated = get_jiffies_64(); |
244 | } else if (e->fltr_state == ICE_ARFS_ACTIVE) { |
245 | /* check if filter needs to be removed from HW */ |
246 | if (ice_arfs_is_flow_expired(vsi, arfs_entry: e)) { |
247 | /* remove aRFS entry from hash table for delete |
248 | * and to prevent referencing it the next time |
249 | * through this hlist index |
250 | */ |
251 | hlist_del(n: &e->list_entry); |
252 | e->fltr_state = ICE_ARFS_TODEL; |
253 | /* save reference to aRFS entry for delete */ |
254 | hlist_add_head(n: &e->list_entry, h: del_list); |
255 | } |
256 | } |
257 | } |
258 | |
259 | /** |
260 | * ice_sync_arfs_fltrs - update all aRFS filters |
261 | * @pf: board private structure |
262 | */ |
263 | void ice_sync_arfs_fltrs(struct ice_pf *pf) |
264 | { |
265 | HLIST_HEAD(tmp_del_list); |
266 | HLIST_HEAD(tmp_add_list); |
267 | struct ice_vsi *pf_vsi; |
268 | unsigned int i; |
269 | |
270 | pf_vsi = ice_get_main_vsi(pf); |
271 | if (!pf_vsi) |
272 | return; |
273 | |
274 | if (!ice_is_arfs_active(vsi: pf_vsi)) |
275 | return; |
276 | |
277 | spin_lock_bh(lock: &pf_vsi->arfs_lock); |
278 | /* Once we process aRFS for the PF VSI get out */ |
279 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) |
280 | ice_arfs_update_flow_rules(vsi: pf_vsi, idx: i, add_list: &tmp_add_list, |
281 | del_list: &tmp_del_list); |
282 | spin_unlock_bh(lock: &pf_vsi->arfs_lock); |
283 | |
284 | /* use list of ice_arfs_entry(s) for delete */ |
285 | ice_arfs_del_flow_rules(vsi: pf_vsi, del_list_head: &tmp_del_list); |
286 | |
287 | /* use list of ice_arfs_entry_ptr(s) for add */ |
288 | ice_arfs_add_flow_rules(vsi: pf_vsi, add_list_head: &tmp_add_list); |
289 | } |
290 | |
291 | /** |
292 | * ice_arfs_build_entry - builds an aRFS entry based on input |
293 | * @vsi: destination VSI for this flow |
294 | * @fk: flow dissector keys for creating the tuple |
295 | * @rxq_idx: Rx queue to steer this flow to |
296 | * @flow_id: passed down from the stack and saved for flow expiration |
297 | * |
298 | * returns an aRFS entry on success and NULL on failure |
299 | */ |
300 | static struct ice_arfs_entry * |
301 | ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk, |
302 | u16 rxq_idx, u32 flow_id) |
303 | { |
304 | struct ice_arfs_entry *arfs_entry; |
305 | struct ice_fdir_fltr *fltr_info; |
306 | u8 ip_proto; |
307 | |
308 | arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back), |
309 | size: sizeof(*arfs_entry), |
310 | GFP_ATOMIC | __GFP_NOWARN); |
311 | if (!arfs_entry) |
312 | return NULL; |
313 | |
314 | fltr_info = &arfs_entry->fltr_info; |
315 | fltr_info->q_index = rxq_idx; |
316 | fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; |
317 | fltr_info->dest_vsi = vsi->idx; |
318 | ip_proto = fk->basic.ip_proto; |
319 | |
320 | if (fk->basic.n_proto == htons(ETH_P_IP)) { |
321 | fltr_info->ip.v4.proto = ip_proto; |
322 | fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? |
323 | ICE_FLTR_PTYPE_NONF_IPV4_TCP : |
324 | ICE_FLTR_PTYPE_NONF_IPV4_UDP; |
325 | fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; |
326 | fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; |
327 | fltr_info->ip.v4.src_port = fk->ports.src; |
328 | fltr_info->ip.v4.dst_port = fk->ports.dst; |
329 | } else { /* ETH_P_IPV6 */ |
330 | fltr_info->ip.v6.proto = ip_proto; |
331 | fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? |
332 | ICE_FLTR_PTYPE_NONF_IPV6_TCP : |
333 | ICE_FLTR_PTYPE_NONF_IPV6_UDP; |
334 | memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, |
335 | sizeof(struct in6_addr)); |
336 | memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, |
337 | sizeof(struct in6_addr)); |
338 | fltr_info->ip.v6.src_port = fk->ports.src; |
339 | fltr_info->ip.v6.dst_port = fk->ports.dst; |
340 | } |
341 | |
342 | arfs_entry->flow_id = flow_id; |
343 | fltr_info->fltr_id = |
344 | atomic_inc_return(v: vsi->arfs_last_fltr_id) % RPS_NO_FILTER; |
345 | |
346 | return arfs_entry; |
347 | } |
348 | |
349 | /** |
350 | * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set |
351 | * @hw: pointer to HW structure |
352 | * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order |
353 | * @l4_proto: IPPROTO_UDP or IPPROTO_TCP |
354 | * |
355 | * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS |
356 | * to check if perfect (4-tuple) flow rules are currently in place by Flow |
357 | * Director. |
358 | */ |
359 | static bool |
360 | ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) |
361 | { |
362 | unsigned long *perfect_fltr = hw->fdir_perfect_fltr; |
363 | |
364 | /* advanced Flow Director disabled, perfect filters always supported */ |
365 | if (!perfect_fltr) |
366 | return true; |
367 | |
368 | if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP) |
369 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr); |
370 | else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP) |
371 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr); |
372 | else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP) |
373 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr); |
374 | else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP) |
375 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr); |
376 | |
377 | return false; |
378 | } |
379 | |
380 | /** |
381 | * ice_rx_flow_steer - steer the Rx flow to where application is being run |
382 | * @netdev: ptr to the netdev being adjusted |
383 | * @skb: buffer with required header information |
384 | * @rxq_idx: queue to which the flow needs to move |
385 | * @flow_id: flow identifier provided by the netdev |
386 | * |
387 | * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the |
388 | * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and |
389 | * if the flow_id already exists in the hash table but the rxq_idx has changed |
390 | * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else |
391 | * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table. |
392 | * If neither of the previous conditions are true then add a new entry in the |
393 | * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be |
394 | * added to HW. |
395 | */ |
396 | int |
397 | ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, |
398 | u16 rxq_idx, u32 flow_id) |
399 | { |
400 | struct ice_netdev_priv *np = netdev_priv(dev: netdev); |
401 | struct ice_arfs_entry *arfs_entry; |
402 | struct ice_vsi *vsi = np->vsi; |
403 | struct flow_keys fk; |
404 | struct ice_pf *pf; |
405 | __be16 n_proto; |
406 | u8 ip_proto; |
407 | u16 idx; |
408 | int ret; |
409 | |
410 | /* failed to allocate memory for aRFS so don't crash */ |
411 | if (unlikely(!vsi->arfs_fltr_list)) |
412 | return -ENODEV; |
413 | |
414 | pf = vsi->back; |
415 | |
416 | if (skb->encapsulation) |
417 | return -EPROTONOSUPPORT; |
418 | |
419 | if (!skb_flow_dissect_flow_keys(skb, flow: &fk, flags: 0)) |
420 | return -EPROTONOSUPPORT; |
421 | |
422 | n_proto = fk.basic.n_proto; |
423 | /* Support only IPV4 and IPV6 */ |
424 | if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(iph: ip_hdr(skb))) || |
425 | n_proto == htons(ETH_P_IPV6)) |
426 | ip_proto = fk.basic.ip_proto; |
427 | else |
428 | return -EPROTONOSUPPORT; |
429 | |
430 | /* Support only TCP and UDP */ |
431 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) |
432 | return -EPROTONOSUPPORT; |
433 | |
434 | /* only support 4-tuple filters for aRFS */ |
435 | if (!ice_arfs_is_perfect_flow_set(hw: &pf->hw, l3_proto: n_proto, l4_proto: ip_proto)) |
436 | return -EOPNOTSUPP; |
437 | |
438 | /* choose the aRFS list bucket based on skb hash */ |
439 | idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK; |
440 | /* search for entry in the bucket */ |
441 | spin_lock_bh(lock: &vsi->arfs_lock); |
442 | hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx], |
443 | list_entry) { |
444 | struct ice_fdir_fltr *fltr_info; |
445 | |
446 | /* keep searching for the already existing arfs_entry flow */ |
447 | if (arfs_entry->flow_id != flow_id) |
448 | continue; |
449 | |
450 | fltr_info = &arfs_entry->fltr_info; |
451 | ret = fltr_info->fltr_id; |
452 | |
453 | if (fltr_info->q_index == rxq_idx || |
454 | arfs_entry->fltr_state != ICE_ARFS_ACTIVE) |
455 | goto out; |
456 | |
457 | /* update the queue to forward to on an already existing flow */ |
458 | fltr_info->q_index = rxq_idx; |
459 | arfs_entry->fltr_state = ICE_ARFS_INACTIVE; |
460 | ice_arfs_update_active_fltr_cntrs(vsi, entry: arfs_entry, add: false); |
461 | goto out_schedule_service_task; |
462 | } |
463 | |
464 | arfs_entry = ice_arfs_build_entry(vsi, fk: &fk, rxq_idx, flow_id); |
465 | if (!arfs_entry) { |
466 | ret = -ENOMEM; |
467 | goto out; |
468 | } |
469 | |
470 | ret = arfs_entry->fltr_info.fltr_id; |
471 | INIT_HLIST_NODE(h: &arfs_entry->list_entry); |
472 | hlist_add_head(n: &arfs_entry->list_entry, h: &vsi->arfs_fltr_list[idx]); |
473 | out_schedule_service_task: |
474 | ice_service_task_schedule(pf); |
475 | out: |
476 | spin_unlock_bh(lock: &vsi->arfs_lock); |
477 | return ret; |
478 | } |
479 | |
480 | /** |
481 | * ice_init_arfs_cntrs - initialize aRFS counter values |
482 | * @vsi: VSI that aRFS counters need to be initialized on |
483 | */ |
484 | static int ice_init_arfs_cntrs(struct ice_vsi *vsi) |
485 | { |
486 | if (!vsi || vsi->type != ICE_VSI_PF) |
487 | return -EINVAL; |
488 | |
489 | vsi->arfs_fltr_cntrs = kzalloc(size: sizeof(*vsi->arfs_fltr_cntrs), |
490 | GFP_KERNEL); |
491 | if (!vsi->arfs_fltr_cntrs) |
492 | return -ENOMEM; |
493 | |
494 | vsi->arfs_last_fltr_id = kzalloc(size: sizeof(*vsi->arfs_last_fltr_id), |
495 | GFP_KERNEL); |
496 | if (!vsi->arfs_last_fltr_id) { |
497 | kfree(objp: vsi->arfs_fltr_cntrs); |
498 | vsi->arfs_fltr_cntrs = NULL; |
499 | return -ENOMEM; |
500 | } |
501 | |
502 | return 0; |
503 | } |
504 | |
505 | /** |
506 | * ice_init_arfs - initialize aRFS resources |
507 | * @vsi: the VSI to be forwarded to |
508 | */ |
509 | void ice_init_arfs(struct ice_vsi *vsi) |
510 | { |
511 | struct hlist_head *arfs_fltr_list; |
512 | unsigned int i; |
513 | |
514 | if (!vsi || vsi->type != ICE_VSI_PF) |
515 | return; |
516 | |
517 | arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, size: sizeof(*arfs_fltr_list), |
518 | GFP_KERNEL); |
519 | if (!arfs_fltr_list) |
520 | return; |
521 | |
522 | if (ice_init_arfs_cntrs(vsi)) |
523 | goto free_arfs_fltr_list; |
524 | |
525 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) |
526 | INIT_HLIST_HEAD(&arfs_fltr_list[i]); |
527 | |
528 | spin_lock_init(&vsi->arfs_lock); |
529 | |
530 | vsi->arfs_fltr_list = arfs_fltr_list; |
531 | |
532 | return; |
533 | |
534 | free_arfs_fltr_list: |
535 | kfree(objp: arfs_fltr_list); |
536 | } |
537 | |
538 | /** |
539 | * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS |
540 | * @vsi: the VSI to be forwarded to |
541 | */ |
542 | void ice_clear_arfs(struct ice_vsi *vsi) |
543 | { |
544 | struct device *dev; |
545 | unsigned int i; |
546 | |
547 | if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back || |
548 | !vsi->arfs_fltr_list) |
549 | return; |
550 | |
551 | dev = ice_pf_to_dev(vsi->back); |
552 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) { |
553 | struct ice_arfs_entry *r; |
554 | struct hlist_node *n; |
555 | |
556 | spin_lock_bh(lock: &vsi->arfs_lock); |
557 | hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i], |
558 | list_entry) { |
559 | hlist_del(n: &r->list_entry); |
560 | devm_kfree(dev, p: r); |
561 | } |
562 | spin_unlock_bh(lock: &vsi->arfs_lock); |
563 | } |
564 | |
565 | kfree(objp: vsi->arfs_fltr_list); |
566 | vsi->arfs_fltr_list = NULL; |
567 | kfree(objp: vsi->arfs_last_fltr_id); |
568 | vsi->arfs_last_fltr_id = NULL; |
569 | kfree(objp: vsi->arfs_fltr_cntrs); |
570 | vsi->arfs_fltr_cntrs = NULL; |
571 | } |
572 | |
573 | /** |
574 | * ice_free_cpu_rx_rmap - free setup CPU reverse map |
575 | * @vsi: the VSI to be forwarded to |
576 | */ |
577 | void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) |
578 | { |
579 | struct net_device *netdev; |
580 | |
581 | if (!vsi || vsi->type != ICE_VSI_PF) |
582 | return; |
583 | |
584 | netdev = vsi->netdev; |
585 | if (!netdev || !netdev->rx_cpu_rmap) |
586 | return; |
587 | |
588 | free_irq_cpu_rmap(rmap: netdev->rx_cpu_rmap); |
589 | netdev->rx_cpu_rmap = NULL; |
590 | } |
591 | |
592 | /** |
593 | * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue |
594 | * @vsi: the VSI to be forwarded to |
595 | */ |
596 | int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) |
597 | { |
598 | struct net_device *netdev; |
599 | struct ice_pf *pf; |
600 | int i; |
601 | |
602 | if (!vsi || vsi->type != ICE_VSI_PF) |
603 | return 0; |
604 | |
605 | pf = vsi->back; |
606 | netdev = vsi->netdev; |
607 | if (!pf || !netdev || !vsi->num_q_vectors) |
608 | return -EINVAL; |
609 | |
610 | netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n" , |
611 | vsi->type, netdev->name, vsi->num_q_vectors); |
612 | |
613 | netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(size: vsi->num_q_vectors); |
614 | if (unlikely(!netdev->rx_cpu_rmap)) |
615 | return -EINVAL; |
616 | |
617 | ice_for_each_q_vector(vsi, i) |
618 | if (irq_cpu_rmap_add(rmap: netdev->rx_cpu_rmap, |
619 | irq: vsi->q_vectors[i]->irq.virq)) { |
620 | ice_free_cpu_rx_rmap(vsi); |
621 | return -EINVAL; |
622 | } |
623 | |
624 | return 0; |
625 | } |
626 | |
627 | /** |
628 | * ice_remove_arfs - remove/clear all aRFS resources |
629 | * @pf: device private structure |
630 | */ |
631 | void ice_remove_arfs(struct ice_pf *pf) |
632 | { |
633 | struct ice_vsi *pf_vsi; |
634 | |
635 | pf_vsi = ice_get_main_vsi(pf); |
636 | if (!pf_vsi) |
637 | return; |
638 | |
639 | ice_clear_arfs(vsi: pf_vsi); |
640 | } |
641 | |
642 | /** |
643 | * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset |
644 | * @pf: device private structure |
645 | */ |
646 | void ice_rebuild_arfs(struct ice_pf *pf) |
647 | { |
648 | struct ice_vsi *pf_vsi; |
649 | |
650 | pf_vsi = ice_get_main_vsi(pf); |
651 | if (!pf_vsi) |
652 | return; |
653 | |
654 | ice_remove_arfs(pf); |
655 | ice_init_arfs(vsi: pf_vsi); |
656 | } |
657 | |