1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2008, 2009 open80211s Ltd. |
4 | * Copyright (C) 2023 Intel Corporation |
5 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
6 | */ |
7 | |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/list.h> |
10 | #include <linux/random.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/spinlock.h> |
13 | #include <linux/string.h> |
14 | #include <net/mac80211.h> |
15 | #include "wme.h" |
16 | #include "ieee80211_i.h" |
17 | #include "mesh.h" |
18 | #include <linux/rhashtable.h> |
19 | |
20 | static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); |
21 | |
22 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) |
23 | { |
24 | /* Use last four bytes of hw addr as hash index */ |
25 | return jhash_1word(a: __get_unaligned_cpu32(p: (u8 *)addr + 2), initval: seed); |
26 | } |
27 | |
28 | static const struct rhashtable_params mesh_rht_params = { |
29 | .nelem_hint = 2, |
30 | .automatic_shrinking = true, |
31 | .key_len = ETH_ALEN, |
32 | .key_offset = offsetof(struct mesh_path, dst), |
33 | .head_offset = offsetof(struct mesh_path, rhash), |
34 | .hashfn = mesh_table_hash, |
35 | }; |
36 | |
37 | static const struct rhashtable_params fast_tx_rht_params = { |
38 | .nelem_hint = 10, |
39 | .automatic_shrinking = true, |
40 | .key_len = ETH_ALEN, |
41 | .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key), |
42 | .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash), |
43 | .hashfn = mesh_table_hash, |
44 | }; |
45 | |
46 | static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr) |
47 | { |
48 | struct ieee80211_mesh_fast_tx *entry = ptr; |
49 | |
50 | kfree_rcu(entry, fast_tx.rcu_head); |
51 | } |
52 | |
53 | static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata) |
54 | { |
55 | struct mesh_tx_cache *cache; |
56 | |
57 | cache = &sdata->u.mesh.tx_cache; |
58 | rhashtable_free_and_destroy(ht: &cache->rht, |
59 | free_fn: __mesh_fast_tx_entry_free, NULL); |
60 | } |
61 | |
62 | static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata) |
63 | { |
64 | struct mesh_tx_cache *cache; |
65 | |
66 | cache = &sdata->u.mesh.tx_cache; |
67 | rhashtable_init(ht: &cache->rht, params: &fast_tx_rht_params); |
68 | INIT_HLIST_HEAD(&cache->walk_head); |
69 | spin_lock_init(&cache->walk_lock); |
70 | } |
71 | |
72 | static inline bool mpath_expired(struct mesh_path *mpath) |
73 | { |
74 | return (mpath->flags & MESH_PATH_ACTIVE) && |
75 | time_after(jiffies, mpath->exp_time) && |
76 | !(mpath->flags & MESH_PATH_FIXED); |
77 | } |
78 | |
79 | static void mesh_path_rht_free(void *ptr, void *tblptr) |
80 | { |
81 | struct mesh_path *mpath = ptr; |
82 | struct mesh_table *tbl = tblptr; |
83 | |
84 | mesh_path_free_rcu(tbl, mpath); |
85 | } |
86 | |
87 | static void mesh_table_init(struct mesh_table *tbl) |
88 | { |
89 | INIT_HLIST_HEAD(&tbl->known_gates); |
90 | INIT_HLIST_HEAD(&tbl->walk_head); |
91 | atomic_set(v: &tbl->entries, i: 0); |
92 | spin_lock_init(&tbl->gates_lock); |
93 | spin_lock_init(&tbl->walk_lock); |
94 | |
95 | /* rhashtable_init() may fail only in case of wrong |
96 | * mesh_rht_params |
97 | */ |
98 | WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); |
99 | } |
100 | |
101 | static void mesh_table_free(struct mesh_table *tbl) |
102 | { |
103 | rhashtable_free_and_destroy(ht: &tbl->rhead, |
104 | free_fn: mesh_path_rht_free, arg: tbl); |
105 | } |
106 | |
107 | /** |
108 | * mesh_path_assign_nexthop - update mesh path next hop |
109 | * |
110 | * @mpath: mesh path to update |
111 | * @sta: next hop to assign |
112 | * |
113 | * Locking: mpath->state_lock must be held when calling this function |
114 | */ |
115 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) |
116 | { |
117 | struct sk_buff *skb; |
118 | struct ieee80211_hdr *hdr; |
119 | unsigned long flags; |
120 | |
121 | rcu_assign_pointer(mpath->next_hop, sta); |
122 | |
123 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); |
124 | skb_queue_walk(&mpath->frame_queue, skb) { |
125 | hdr = (struct ieee80211_hdr *) skb->data; |
126 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); |
127 | memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); |
128 | ieee80211_mps_set_frame_flags(sdata: sta->sdata, sta, hdr); |
129 | } |
130 | |
131 | spin_unlock_irqrestore(lock: &mpath->frame_queue.lock, flags); |
132 | } |
133 | |
134 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, |
135 | struct mesh_path *gate_mpath) |
136 | { |
137 | struct ieee80211_hdr *hdr; |
138 | struct ieee80211s_hdr *mshdr; |
139 | int mesh_hdrlen, hdrlen; |
140 | char *next_hop; |
141 | |
142 | hdr = (struct ieee80211_hdr *) skb->data; |
143 | hdrlen = ieee80211_hdrlen(fc: hdr->frame_control); |
144 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
145 | |
146 | if (!(mshdr->flags & MESH_FLAGS_AE)) { |
147 | /* size of the fixed part of the mesh header */ |
148 | mesh_hdrlen = 6; |
149 | |
150 | /* make room for the two extended addresses */ |
151 | skb_push(skb, len: 2 * ETH_ALEN); |
152 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); |
153 | |
154 | hdr = (struct ieee80211_hdr *) skb->data; |
155 | |
156 | /* we preserve the previous mesh header and only add |
157 | * the new addresses */ |
158 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
159 | mshdr->flags = MESH_FLAGS_AE_A5_A6; |
160 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); |
161 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); |
162 | } |
163 | |
164 | /* update next hop */ |
165 | hdr = (struct ieee80211_hdr *) skb->data; |
166 | rcu_read_lock(); |
167 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; |
168 | memcpy(hdr->addr1, next_hop, ETH_ALEN); |
169 | rcu_read_unlock(); |
170 | memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); |
171 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); |
172 | } |
173 | |
174 | /** |
175 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another |
176 | * |
177 | * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) |
178 | * @from_mpath: The failed mpath |
179 | * @copy: When true, copy all the frames to the new mpath queue. When false, |
180 | * move them. |
181 | * |
182 | * This function is used to transfer or copy frames from an unresolved mpath to |
183 | * a gate mpath. The function also adds the Address Extension field and |
184 | * updates the next hop. |
185 | * |
186 | * If a frame already has an Address Extension field, only the next hop and |
187 | * destination addresses are updated. |
188 | * |
189 | * The gate mpath must be an active mpath with a valid mpath->next_hop. |
190 | */ |
191 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, |
192 | struct mesh_path *from_mpath, |
193 | bool copy) |
194 | { |
195 | struct sk_buff *skb, *fskb, *tmp; |
196 | struct sk_buff_head failq; |
197 | unsigned long flags; |
198 | |
199 | if (WARN_ON(gate_mpath == from_mpath)) |
200 | return; |
201 | if (WARN_ON(!gate_mpath->next_hop)) |
202 | return; |
203 | |
204 | __skb_queue_head_init(list: &failq); |
205 | |
206 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
207 | skb_queue_splice_init(list: &from_mpath->frame_queue, head: &failq); |
208 | spin_unlock_irqrestore(lock: &from_mpath->frame_queue.lock, flags); |
209 | |
210 | skb_queue_walk_safe(&failq, fskb, tmp) { |
211 | if (skb_queue_len(list_: &gate_mpath->frame_queue) >= |
212 | MESH_FRAME_QUEUE_LEN) { |
213 | mpath_dbg(gate_mpath->sdata, "mpath queue full!\n" ); |
214 | break; |
215 | } |
216 | |
217 | skb = skb_copy(skb: fskb, GFP_ATOMIC); |
218 | if (WARN_ON(!skb)) |
219 | break; |
220 | |
221 | prepare_for_gate(skb, dst_addr: gate_mpath->dst, gate_mpath); |
222 | skb_queue_tail(list: &gate_mpath->frame_queue, newsk: skb); |
223 | |
224 | if (copy) |
225 | continue; |
226 | |
227 | __skb_unlink(skb: fskb, list: &failq); |
228 | kfree_skb(skb: fskb); |
229 | } |
230 | |
231 | mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n" , |
232 | gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); |
233 | |
234 | if (!copy) |
235 | return; |
236 | |
237 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
238 | skb_queue_splice(list: &failq, head: &from_mpath->frame_queue); |
239 | spin_unlock_irqrestore(lock: &from_mpath->frame_queue.lock, flags); |
240 | } |
241 | |
242 | |
243 | static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, |
244 | struct ieee80211_sub_if_data *sdata) |
245 | { |
246 | struct mesh_path *mpath; |
247 | |
248 | mpath = rhashtable_lookup(ht: &tbl->rhead, key: dst, params: mesh_rht_params); |
249 | |
250 | if (mpath && mpath_expired(mpath)) { |
251 | spin_lock_bh(lock: &mpath->state_lock); |
252 | mpath->flags &= ~MESH_PATH_ACTIVE; |
253 | spin_unlock_bh(lock: &mpath->state_lock); |
254 | } |
255 | return mpath; |
256 | } |
257 | |
258 | /** |
259 | * mesh_path_lookup - look up a path in the mesh path table |
260 | * @sdata: local subif |
261 | * @dst: hardware address (ETH_ALEN length) of destination |
262 | * |
263 | * Returns: pointer to the mesh path structure, or NULL if not found |
264 | * |
265 | * Locking: must be called within a read rcu section. |
266 | */ |
267 | struct mesh_path * |
268 | mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) |
269 | { |
270 | return mpath_lookup(tbl: &sdata->u.mesh.mesh_paths, dst, sdata); |
271 | } |
272 | |
273 | struct mesh_path * |
274 | mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) |
275 | { |
276 | return mpath_lookup(tbl: &sdata->u.mesh.mpp_paths, dst, sdata); |
277 | } |
278 | |
279 | static struct mesh_path * |
280 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) |
281 | { |
282 | int i = 0; |
283 | struct mesh_path *mpath; |
284 | |
285 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
286 | if (i++ == idx) |
287 | break; |
288 | } |
289 | |
290 | if (!mpath) |
291 | return NULL; |
292 | |
293 | if (mpath_expired(mpath)) { |
294 | spin_lock_bh(lock: &mpath->state_lock); |
295 | mpath->flags &= ~MESH_PATH_ACTIVE; |
296 | spin_unlock_bh(lock: &mpath->state_lock); |
297 | } |
298 | return mpath; |
299 | } |
300 | |
301 | /** |
302 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
303 | * @idx: index |
304 | * @sdata: local subif, or NULL for all entries |
305 | * |
306 | * Returns: pointer to the mesh path structure, or NULL if not found. |
307 | * |
308 | * Locking: must be called within a read rcu section. |
309 | */ |
310 | struct mesh_path * |
311 | mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) |
312 | { |
313 | return __mesh_path_lookup_by_idx(tbl: &sdata->u.mesh.mesh_paths, idx); |
314 | } |
315 | |
316 | /** |
317 | * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index |
318 | * @idx: index |
319 | * @sdata: local subif, or NULL for all entries |
320 | * |
321 | * Returns: pointer to the proxy path structure, or NULL if not found. |
322 | * |
323 | * Locking: must be called within a read rcu section. |
324 | */ |
325 | struct mesh_path * |
326 | mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) |
327 | { |
328 | return __mesh_path_lookup_by_idx(tbl: &sdata->u.mesh.mpp_paths, idx); |
329 | } |
330 | |
331 | /** |
332 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table |
333 | * @mpath: gate path to add to table |
334 | * |
335 | * Returns: 0 on success, -EEXIST |
336 | */ |
337 | int mesh_path_add_gate(struct mesh_path *mpath) |
338 | { |
339 | struct mesh_table *tbl; |
340 | int err; |
341 | |
342 | rcu_read_lock(); |
343 | tbl = &mpath->sdata->u.mesh.mesh_paths; |
344 | |
345 | spin_lock_bh(lock: &mpath->state_lock); |
346 | if (mpath->is_gate) { |
347 | err = -EEXIST; |
348 | spin_unlock_bh(lock: &mpath->state_lock); |
349 | goto err_rcu; |
350 | } |
351 | mpath->is_gate = true; |
352 | mpath->sdata->u.mesh.num_gates++; |
353 | |
354 | spin_lock(lock: &tbl->gates_lock); |
355 | hlist_add_head_rcu(n: &mpath->gate_list, h: &tbl->known_gates); |
356 | spin_unlock(lock: &tbl->gates_lock); |
357 | |
358 | spin_unlock_bh(lock: &mpath->state_lock); |
359 | |
360 | mpath_dbg(mpath->sdata, |
361 | "Mesh path: Recorded new gate: %pM. %d known gates\n" , |
362 | mpath->dst, mpath->sdata->u.mesh.num_gates); |
363 | err = 0; |
364 | err_rcu: |
365 | rcu_read_unlock(); |
366 | return err; |
367 | } |
368 | |
369 | /** |
370 | * mesh_gate_del - remove a mesh gate from the list of known gates |
371 | * @tbl: table which holds our list of known gates |
372 | * @mpath: gate mpath |
373 | */ |
374 | static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
375 | { |
376 | lockdep_assert_held(&mpath->state_lock); |
377 | if (!mpath->is_gate) |
378 | return; |
379 | |
380 | mpath->is_gate = false; |
381 | spin_lock_bh(lock: &tbl->gates_lock); |
382 | hlist_del_rcu(n: &mpath->gate_list); |
383 | mpath->sdata->u.mesh.num_gates--; |
384 | spin_unlock_bh(lock: &tbl->gates_lock); |
385 | |
386 | mpath_dbg(mpath->sdata, |
387 | "Mesh path: Deleted gate: %pM. %d known gates\n" , |
388 | mpath->dst, mpath->sdata->u.mesh.num_gates); |
389 | } |
390 | |
391 | /** |
392 | * mesh_gate_num - number of gates known to this interface |
393 | * @sdata: subif data |
394 | * |
395 | * Returns: The number of gates |
396 | */ |
397 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) |
398 | { |
399 | return sdata->u.mesh.num_gates; |
400 | } |
401 | |
402 | static |
403 | struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, |
404 | const u8 *dst, gfp_t gfp_flags) |
405 | { |
406 | struct mesh_path *new_mpath; |
407 | |
408 | new_mpath = kzalloc(size: sizeof(struct mesh_path), flags: gfp_flags); |
409 | if (!new_mpath) |
410 | return NULL; |
411 | |
412 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
413 | eth_broadcast_addr(addr: new_mpath->rann_snd_addr); |
414 | new_mpath->is_root = false; |
415 | new_mpath->sdata = sdata; |
416 | new_mpath->flags = 0; |
417 | skb_queue_head_init(list: &new_mpath->frame_queue); |
418 | new_mpath->exp_time = jiffies; |
419 | spin_lock_init(&new_mpath->state_lock); |
420 | timer_setup(&new_mpath->timer, mesh_path_timer, 0); |
421 | |
422 | return new_mpath; |
423 | } |
424 | |
425 | static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache, |
426 | struct ieee80211_mesh_fast_tx *entry) |
427 | { |
428 | hlist_del_rcu(n: &entry->walk_list); |
429 | rhashtable_remove_fast(ht: &cache->rht, obj: &entry->rhash, params: fast_tx_rht_params); |
430 | kfree_rcu(entry, fast_tx.rcu_head); |
431 | } |
432 | |
433 | struct ieee80211_mesh_fast_tx * |
434 | mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) |
435 | { |
436 | struct ieee80211_mesh_fast_tx *entry; |
437 | struct mesh_tx_cache *cache; |
438 | |
439 | cache = &sdata->u.mesh.tx_cache; |
440 | entry = rhashtable_lookup(ht: &cache->rht, key: addr, params: fast_tx_rht_params); |
441 | if (!entry) |
442 | return NULL; |
443 | |
444 | if (!(entry->mpath->flags & MESH_PATH_ACTIVE) || |
445 | mpath_expired(mpath: entry->mpath)) { |
446 | spin_lock_bh(lock: &cache->walk_lock); |
447 | entry = rhashtable_lookup(ht: &cache->rht, key: addr, params: fast_tx_rht_params); |
448 | if (entry) |
449 | mesh_fast_tx_entry_free(cache, entry); |
450 | spin_unlock_bh(lock: &cache->walk_lock); |
451 | return NULL; |
452 | } |
453 | |
454 | mesh_path_refresh(sdata, mpath: entry->mpath, NULL); |
455 | if (entry->mppath) |
456 | entry->mppath->exp_time = jiffies; |
457 | entry->timestamp = jiffies; |
458 | |
459 | return entry; |
460 | } |
461 | |
462 | void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata, |
463 | struct sk_buff *skb, struct mesh_path *mpath) |
464 | { |
465 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
466 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
467 | struct ieee80211_mesh_fast_tx *entry, *prev; |
468 | struct ieee80211_mesh_fast_tx build = {}; |
469 | struct ieee80211s_hdr *meshhdr; |
470 | struct mesh_tx_cache *cache; |
471 | struct ieee80211_key *key; |
472 | struct mesh_path *mppath; |
473 | struct sta_info *sta; |
474 | u8 *qc; |
475 | |
476 | if (sdata->noack_map || |
477 | !ieee80211_is_data_qos(fc: hdr->frame_control)) |
478 | return; |
479 | |
480 | build.fast_tx.hdr_len = ieee80211_hdrlen(fc: hdr->frame_control); |
481 | meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len); |
482 | build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr); |
483 | |
484 | cache = &sdata->u.mesh.tx_cache; |
485 | if (atomic_read(v: &cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE) |
486 | return; |
487 | |
488 | sta = rcu_dereference(mpath->next_hop); |
489 | if (!sta) |
490 | return; |
491 | |
492 | if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { |
493 | /* This is required to keep the mppath alive */ |
494 | mppath = mpp_path_lookup(sdata, dst: meshhdr->eaddr1); |
495 | if (!mppath) |
496 | return; |
497 | build.mppath = mppath; |
498 | } else if (ieee80211_has_a4(fc: hdr->frame_control)) { |
499 | mppath = mpath; |
500 | } else { |
501 | return; |
502 | } |
503 | |
504 | /* rate limit, in case fast xmit can't be enabled */ |
505 | if (mppath->fast_tx_check == jiffies) |
506 | return; |
507 | |
508 | mppath->fast_tx_check = jiffies; |
509 | |
510 | /* |
511 | * Same use of the sta lock as in ieee80211_check_fast_xmit, in order |
512 | * to protect against concurrent sta key updates. |
513 | */ |
514 | spin_lock_bh(lock: &sta->lock); |
515 | key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); |
516 | if (!key) |
517 | key = rcu_access_pointer(sdata->default_unicast_key); |
518 | build.fast_tx.key = key; |
519 | |
520 | if (key) { |
521 | bool gen_iv, iv_spc; |
522 | |
523 | gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; |
524 | iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; |
525 | |
526 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || |
527 | (key->flags & KEY_FLAG_TAINTED)) |
528 | goto unlock_sta; |
529 | |
530 | switch (key->conf.cipher) { |
531 | case WLAN_CIPHER_SUITE_CCMP: |
532 | case WLAN_CIPHER_SUITE_CCMP_256: |
533 | if (gen_iv) |
534 | build.fast_tx.pn_offs = build.fast_tx.hdr_len; |
535 | if (gen_iv || iv_spc) |
536 | build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN; |
537 | break; |
538 | case WLAN_CIPHER_SUITE_GCMP: |
539 | case WLAN_CIPHER_SUITE_GCMP_256: |
540 | if (gen_iv) |
541 | build.fast_tx.pn_offs = build.fast_tx.hdr_len; |
542 | if (gen_iv || iv_spc) |
543 | build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN; |
544 | break; |
545 | default: |
546 | goto unlock_sta; |
547 | } |
548 | } |
549 | |
550 | memcpy(build.addr_key, mppath->dst, ETH_ALEN); |
551 | build.timestamp = jiffies; |
552 | build.fast_tx.band = info->band; |
553 | build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3); |
554 | build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4); |
555 | build.mpath = mpath; |
556 | memcpy(build.hdr, meshhdr, build.hdrlen); |
557 | memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header)); |
558 | build.hdrlen += sizeof(rfc1042_header); |
559 | memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len); |
560 | |
561 | hdr = (struct ieee80211_hdr *)build.fast_tx.hdr; |
562 | if (build.fast_tx.key) |
563 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
564 | |
565 | qc = ieee80211_get_qos_ctl(hdr); |
566 | qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8; |
567 | |
568 | entry = kmemdup(p: &build, size: sizeof(build), GFP_ATOMIC); |
569 | if (!entry) |
570 | goto unlock_sta; |
571 | |
572 | spin_lock(lock: &cache->walk_lock); |
573 | prev = rhashtable_lookup_get_insert_fast(ht: &cache->rht, |
574 | obj: &entry->rhash, |
575 | params: fast_tx_rht_params); |
576 | if (unlikely(IS_ERR(prev))) { |
577 | kfree(objp: entry); |
578 | goto unlock_cache; |
579 | } |
580 | |
581 | /* |
582 | * replace any previous entry in the hash table, in case we're |
583 | * replacing it with a different type (e.g. mpath -> mpp) |
584 | */ |
585 | if (unlikely(prev)) { |
586 | rhashtable_replace_fast(ht: &cache->rht, obj_old: &prev->rhash, |
587 | obj_new: &entry->rhash, params: fast_tx_rht_params); |
588 | hlist_del_rcu(n: &prev->walk_list); |
589 | kfree_rcu(prev, fast_tx.rcu_head); |
590 | } |
591 | |
592 | hlist_add_head(n: &entry->walk_list, h: &cache->walk_head); |
593 | |
594 | unlock_cache: |
595 | spin_unlock(lock: &cache->walk_lock); |
596 | unlock_sta: |
597 | spin_unlock_bh(lock: &sta->lock); |
598 | } |
599 | |
600 | void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata) |
601 | { |
602 | unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT); |
603 | struct mesh_tx_cache *cache; |
604 | struct ieee80211_mesh_fast_tx *entry; |
605 | struct hlist_node *n; |
606 | |
607 | cache = &sdata->u.mesh.tx_cache; |
608 | if (atomic_read(v: &cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE) |
609 | return; |
610 | |
611 | spin_lock_bh(lock: &cache->walk_lock); |
612 | hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) |
613 | if (!time_is_after_jiffies(entry->timestamp + timeout)) |
614 | mesh_fast_tx_entry_free(cache, entry); |
615 | spin_unlock_bh(lock: &cache->walk_lock); |
616 | } |
617 | |
618 | void mesh_fast_tx_flush_mpath(struct mesh_path *mpath) |
619 | { |
620 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
621 | struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; |
622 | struct ieee80211_mesh_fast_tx *entry; |
623 | struct hlist_node *n; |
624 | |
625 | cache = &sdata->u.mesh.tx_cache; |
626 | spin_lock_bh(lock: &cache->walk_lock); |
627 | hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) |
628 | if (entry->mpath == mpath) |
629 | mesh_fast_tx_entry_free(cache, entry); |
630 | spin_unlock_bh(lock: &cache->walk_lock); |
631 | } |
632 | |
633 | void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata, |
634 | struct sta_info *sta) |
635 | { |
636 | struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; |
637 | struct ieee80211_mesh_fast_tx *entry; |
638 | struct hlist_node *n; |
639 | |
640 | cache = &sdata->u.mesh.tx_cache; |
641 | spin_lock_bh(lock: &cache->walk_lock); |
642 | hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) |
643 | if (rcu_access_pointer(entry->mpath->next_hop) == sta) |
644 | mesh_fast_tx_entry_free(cache, entry); |
645 | spin_unlock_bh(lock: &cache->walk_lock); |
646 | } |
647 | |
648 | void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, |
649 | const u8 *addr) |
650 | { |
651 | struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; |
652 | struct ieee80211_mesh_fast_tx *entry; |
653 | |
654 | cache = &sdata->u.mesh.tx_cache; |
655 | spin_lock_bh(lock: &cache->walk_lock); |
656 | entry = rhashtable_lookup_fast(ht: &cache->rht, key: addr, params: fast_tx_rht_params); |
657 | if (entry) |
658 | mesh_fast_tx_entry_free(cache, entry); |
659 | spin_unlock_bh(lock: &cache->walk_lock); |
660 | } |
661 | |
662 | /** |
663 | * mesh_path_add - allocate and add a new path to the mesh path table |
664 | * @dst: destination address of the path (ETH_ALEN length) |
665 | * @sdata: local subif |
666 | * |
667 | * Returns: 0 on success |
668 | * |
669 | * State: the initial state of the new path is set to 0 |
670 | */ |
671 | struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, |
672 | const u8 *dst) |
673 | { |
674 | struct mesh_table *tbl; |
675 | struct mesh_path *mpath, *new_mpath; |
676 | |
677 | if (ether_addr_equal(addr1: dst, addr2: sdata->vif.addr)) |
678 | /* never add ourselves as neighbours */ |
679 | return ERR_PTR(error: -ENOTSUPP); |
680 | |
681 | if (is_multicast_ether_addr(addr: dst)) |
682 | return ERR_PTR(error: -ENOTSUPP); |
683 | |
684 | if (atomic_add_unless(v: &sdata->u.mesh.mpaths, a: 1, MESH_MAX_MPATHS) == 0) |
685 | return ERR_PTR(error: -ENOSPC); |
686 | |
687 | new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); |
688 | if (!new_mpath) |
689 | return ERR_PTR(error: -ENOMEM); |
690 | |
691 | tbl = &sdata->u.mesh.mesh_paths; |
692 | spin_lock_bh(lock: &tbl->walk_lock); |
693 | mpath = rhashtable_lookup_get_insert_fast(ht: &tbl->rhead, |
694 | obj: &new_mpath->rhash, |
695 | params: mesh_rht_params); |
696 | if (!mpath) |
697 | hlist_add_head(n: &new_mpath->walk_list, h: &tbl->walk_head); |
698 | spin_unlock_bh(lock: &tbl->walk_lock); |
699 | |
700 | if (mpath) { |
701 | kfree(objp: new_mpath); |
702 | |
703 | if (IS_ERR(ptr: mpath)) |
704 | return mpath; |
705 | |
706 | new_mpath = mpath; |
707 | } |
708 | |
709 | sdata->u.mesh.mesh_paths_generation++; |
710 | return new_mpath; |
711 | } |
712 | |
713 | int mpp_path_add(struct ieee80211_sub_if_data *sdata, |
714 | const u8 *dst, const u8 *mpp) |
715 | { |
716 | struct mesh_table *tbl; |
717 | struct mesh_path *new_mpath; |
718 | int ret; |
719 | |
720 | if (ether_addr_equal(addr1: dst, addr2: sdata->vif.addr)) |
721 | /* never add ourselves as neighbours */ |
722 | return -ENOTSUPP; |
723 | |
724 | if (is_multicast_ether_addr(addr: dst)) |
725 | return -ENOTSUPP; |
726 | |
727 | new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); |
728 | |
729 | if (!new_mpath) |
730 | return -ENOMEM; |
731 | |
732 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
733 | tbl = &sdata->u.mesh.mpp_paths; |
734 | |
735 | spin_lock_bh(lock: &tbl->walk_lock); |
736 | ret = rhashtable_lookup_insert_fast(ht: &tbl->rhead, |
737 | obj: &new_mpath->rhash, |
738 | params: mesh_rht_params); |
739 | if (!ret) |
740 | hlist_add_head_rcu(n: &new_mpath->walk_list, h: &tbl->walk_head); |
741 | spin_unlock_bh(lock: &tbl->walk_lock); |
742 | |
743 | if (ret) |
744 | kfree(objp: new_mpath); |
745 | else |
746 | mesh_fast_tx_flush_addr(sdata, addr: dst); |
747 | |
748 | sdata->u.mesh.mpp_paths_generation++; |
749 | return ret; |
750 | } |
751 | |
752 | |
753 | /** |
754 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks |
755 | * |
756 | * @sta: broken peer link |
757 | * |
758 | * This function must be called from the rate control algorithm if enough |
759 | * delivery errors suggest that a peer link is no longer usable. |
760 | */ |
761 | void mesh_plink_broken(struct sta_info *sta) |
762 | { |
763 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
764 | struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; |
765 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
766 | struct mesh_path *mpath; |
767 | |
768 | rcu_read_lock(); |
769 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
770 | if (rcu_access_pointer(mpath->next_hop) == sta && |
771 | mpath->flags & MESH_PATH_ACTIVE && |
772 | !(mpath->flags & MESH_PATH_FIXED)) { |
773 | spin_lock_bh(lock: &mpath->state_lock); |
774 | mpath->flags &= ~MESH_PATH_ACTIVE; |
775 | ++mpath->sn; |
776 | spin_unlock_bh(lock: &mpath->state_lock); |
777 | mesh_path_error_tx(sdata, |
778 | ttl: sdata->u.mesh.mshcfg.element_ttl, |
779 | target: mpath->dst, target_sn: mpath->sn, |
780 | target_rcode: WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, ra: bcast); |
781 | } |
782 | } |
783 | rcu_read_unlock(); |
784 | } |
785 | |
786 | static void mesh_path_free_rcu(struct mesh_table *tbl, |
787 | struct mesh_path *mpath) |
788 | { |
789 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
790 | |
791 | spin_lock_bh(lock: &mpath->state_lock); |
792 | mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; |
793 | mesh_gate_del(tbl, mpath); |
794 | spin_unlock_bh(lock: &mpath->state_lock); |
795 | timer_shutdown_sync(timer: &mpath->timer); |
796 | atomic_dec(v: &sdata->u.mesh.mpaths); |
797 | atomic_dec(v: &tbl->entries); |
798 | mesh_path_flush_pending(mpath); |
799 | kfree_rcu(mpath, rcu); |
800 | } |
801 | |
802 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) |
803 | { |
804 | hlist_del_rcu(n: &mpath->walk_list); |
805 | rhashtable_remove_fast(ht: &tbl->rhead, obj: &mpath->rhash, params: mesh_rht_params); |
806 | if (tbl == &mpath->sdata->u.mesh.mpp_paths) |
807 | mesh_fast_tx_flush_addr(sdata: mpath->sdata, addr: mpath->dst); |
808 | else |
809 | mesh_fast_tx_flush_mpath(mpath); |
810 | mesh_path_free_rcu(tbl, mpath); |
811 | } |
812 | |
813 | /** |
814 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
815 | * |
816 | * @sta: mesh peer to match |
817 | * |
818 | * RCU notes: this function is called when a mesh plink transitions from |
819 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that |
820 | * allows path creation. This will happen before the sta can be freed (because |
821 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
822 | * protected against the plink disappearing. |
823 | */ |
824 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
825 | { |
826 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
827 | struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; |
828 | struct mesh_path *mpath; |
829 | struct hlist_node *n; |
830 | |
831 | spin_lock_bh(lock: &tbl->walk_lock); |
832 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
833 | if (rcu_access_pointer(mpath->next_hop) == sta) |
834 | __mesh_path_del(tbl, mpath); |
835 | } |
836 | spin_unlock_bh(lock: &tbl->walk_lock); |
837 | } |
838 | |
839 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, |
840 | const u8 *proxy) |
841 | { |
842 | struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; |
843 | struct mesh_path *mpath; |
844 | struct hlist_node *n; |
845 | |
846 | spin_lock_bh(lock: &tbl->walk_lock); |
847 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
848 | if (ether_addr_equal(addr1: mpath->mpp, addr2: proxy)) |
849 | __mesh_path_del(tbl, mpath); |
850 | } |
851 | spin_unlock_bh(lock: &tbl->walk_lock); |
852 | } |
853 | |
854 | static void table_flush_by_iface(struct mesh_table *tbl) |
855 | { |
856 | struct mesh_path *mpath; |
857 | struct hlist_node *n; |
858 | |
859 | spin_lock_bh(lock: &tbl->walk_lock); |
860 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
861 | __mesh_path_del(tbl, mpath); |
862 | } |
863 | spin_unlock_bh(lock: &tbl->walk_lock); |
864 | } |
865 | |
866 | /** |
867 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface |
868 | * |
869 | * @sdata: interface data to match |
870 | * |
871 | * This function deletes both mesh paths as well as mesh portal paths. |
872 | */ |
873 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) |
874 | { |
875 | table_flush_by_iface(tbl: &sdata->u.mesh.mesh_paths); |
876 | table_flush_by_iface(tbl: &sdata->u.mesh.mpp_paths); |
877 | } |
878 | |
879 | /** |
880 | * table_path_del - delete a path from the mesh or mpp table |
881 | * |
882 | * @tbl: mesh or mpp path table |
883 | * @sdata: local subif |
884 | * @addr: dst address (ETH_ALEN length) |
885 | * |
886 | * Returns: 0 if successful |
887 | */ |
888 | static int table_path_del(struct mesh_table *tbl, |
889 | struct ieee80211_sub_if_data *sdata, |
890 | const u8 *addr) |
891 | { |
892 | struct mesh_path *mpath; |
893 | |
894 | spin_lock_bh(lock: &tbl->walk_lock); |
895 | mpath = rhashtable_lookup_fast(ht: &tbl->rhead, key: addr, params: mesh_rht_params); |
896 | if (!mpath) { |
897 | spin_unlock_bh(lock: &tbl->walk_lock); |
898 | return -ENXIO; |
899 | } |
900 | |
901 | __mesh_path_del(tbl, mpath); |
902 | spin_unlock_bh(lock: &tbl->walk_lock); |
903 | return 0; |
904 | } |
905 | |
906 | |
907 | /** |
908 | * mesh_path_del - delete a mesh path from the table |
909 | * |
910 | * @addr: dst address (ETH_ALEN length) |
911 | * @sdata: local subif |
912 | * |
913 | * Returns: 0 if successful |
914 | */ |
915 | int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) |
916 | { |
917 | int err; |
918 | |
919 | /* flush relevant mpp entries first */ |
920 | mpp_flush_by_proxy(sdata, proxy: addr); |
921 | |
922 | err = table_path_del(tbl: &sdata->u.mesh.mesh_paths, sdata, addr); |
923 | sdata->u.mesh.mesh_paths_generation++; |
924 | return err; |
925 | } |
926 | |
927 | /** |
928 | * mesh_path_tx_pending - sends pending frames in a mesh path queue |
929 | * |
930 | * @mpath: mesh path to activate |
931 | * |
932 | * Locking: the state_lock of the mpath structure must NOT be held when calling |
933 | * this function. |
934 | */ |
935 | void mesh_path_tx_pending(struct mesh_path *mpath) |
936 | { |
937 | if (mpath->flags & MESH_PATH_ACTIVE) |
938 | ieee80211_add_pending_skbs(local: mpath->sdata->local, |
939 | skbs: &mpath->frame_queue); |
940 | } |
941 | |
942 | /** |
943 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates |
944 | * |
945 | * @mpath: mesh path whose queue will be emptied |
946 | * |
947 | * If there is only one gate, the frames are transferred from the failed mpath |
948 | * queue to that gate's queue. If there are more than one gates, the frames |
949 | * are copied from each gate to the next. After frames are copied, the |
950 | * mpath queues are emptied onto the transmission queue. |
951 | * |
952 | * Returns: 0 on success, -EHOSTUNREACH |
953 | */ |
954 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
955 | { |
956 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
957 | struct mesh_table *tbl; |
958 | struct mesh_path *from_mpath = mpath; |
959 | struct mesh_path *gate; |
960 | bool copy = false; |
961 | |
962 | tbl = &sdata->u.mesh.mesh_paths; |
963 | |
964 | rcu_read_lock(); |
965 | hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { |
966 | if (gate->flags & MESH_PATH_ACTIVE) { |
967 | mpath_dbg(sdata, "Forwarding to %pM\n" , gate->dst); |
968 | mesh_path_move_to_queue(gate_mpath: gate, from_mpath, copy); |
969 | from_mpath = gate; |
970 | copy = true; |
971 | } else { |
972 | mpath_dbg(sdata, |
973 | "Not forwarding to %pM (flags %#x)\n" , |
974 | gate->dst, gate->flags); |
975 | } |
976 | } |
977 | |
978 | hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { |
979 | mpath_dbg(sdata, "Sending to %pM\n" , gate->dst); |
980 | mesh_path_tx_pending(mpath: gate); |
981 | } |
982 | rcu_read_unlock(); |
983 | |
984 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; |
985 | } |
986 | |
987 | /** |
988 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
989 | * |
990 | * @skb: frame to discard |
991 | * @sdata: network subif the frame was to be sent through |
992 | * |
993 | * Locking: the function must me called within a rcu_read_lock region |
994 | */ |
995 | void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, |
996 | struct sk_buff *skb) |
997 | { |
998 | ieee80211_free_txskb(hw: &sdata->local->hw, skb); |
999 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
1000 | } |
1001 | |
1002 | /** |
1003 | * mesh_path_flush_pending - free the pending queue of a mesh path |
1004 | * |
1005 | * @mpath: mesh path whose queue has to be freed |
1006 | * |
1007 | * Locking: the function must me called within a rcu_read_lock region |
1008 | */ |
1009 | void mesh_path_flush_pending(struct mesh_path *mpath) |
1010 | { |
1011 | struct sk_buff *skb; |
1012 | |
1013 | while ((skb = skb_dequeue(list: &mpath->frame_queue)) != NULL) |
1014 | mesh_path_discard_frame(sdata: mpath->sdata, skb); |
1015 | } |
1016 | |
1017 | /** |
1018 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path |
1019 | * |
1020 | * @mpath: the mesh path to modify |
1021 | * @next_hop: the next hop to force |
1022 | * |
1023 | * Locking: this function must be called holding mpath->state_lock |
1024 | */ |
1025 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) |
1026 | { |
1027 | spin_lock_bh(lock: &mpath->state_lock); |
1028 | mesh_path_assign_nexthop(mpath, sta: next_hop); |
1029 | mpath->sn = 0xffff; |
1030 | mpath->metric = 0; |
1031 | mpath->hop_count = 0; |
1032 | mpath->exp_time = 0; |
1033 | mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; |
1034 | mesh_path_activate(mpath); |
1035 | mesh_fast_tx_flush_mpath(mpath); |
1036 | spin_unlock_bh(lock: &mpath->state_lock); |
1037 | ewma_mesh_fail_avg_init(e: &next_hop->mesh->fail_avg); |
1038 | /* init it at a low value - 0 start is tricky */ |
1039 | ewma_mesh_fail_avg_add(e: &next_hop->mesh->fail_avg, val: 1); |
1040 | mesh_path_tx_pending(mpath); |
1041 | } |
1042 | |
1043 | void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) |
1044 | { |
1045 | mesh_table_init(tbl: &sdata->u.mesh.mesh_paths); |
1046 | mesh_table_init(tbl: &sdata->u.mesh.mpp_paths); |
1047 | mesh_fast_tx_init(sdata); |
1048 | } |
1049 | |
1050 | static |
1051 | void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, |
1052 | struct mesh_table *tbl) |
1053 | { |
1054 | struct mesh_path *mpath; |
1055 | struct hlist_node *n; |
1056 | |
1057 | spin_lock_bh(lock: &tbl->walk_lock); |
1058 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
1059 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
1060 | (!(mpath->flags & MESH_PATH_FIXED)) && |
1061 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1062 | __mesh_path_del(tbl, mpath); |
1063 | } |
1064 | spin_unlock_bh(lock: &tbl->walk_lock); |
1065 | } |
1066 | |
1067 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
1068 | { |
1069 | mesh_path_tbl_expire(sdata, tbl: &sdata->u.mesh.mesh_paths); |
1070 | mesh_path_tbl_expire(sdata, tbl: &sdata->u.mesh.mpp_paths); |
1071 | } |
1072 | |
1073 | void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) |
1074 | { |
1075 | mesh_fast_tx_deinit(sdata); |
1076 | mesh_table_free(tbl: &sdata->u.mesh.mesh_paths); |
1077 | mesh_table_free(tbl: &sdata->u.mesh.mpp_paths); |
1078 | } |
1079 | |