1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell RVU Ethernet driver |
3 | * |
4 | * Copyright (C) 2023 Marvell. |
5 | * |
6 | */ |
7 | #include <linux/netdevice.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/inetdevice.h> |
10 | #include <linux/bitfield.h> |
11 | |
12 | #include "otx2_common.h" |
13 | #include "cn10k.h" |
14 | #include "qos.h" |
15 | |
16 | #define OTX2_QOS_QID_INNER 0xFFFFU |
17 | #define OTX2_QOS_QID_NONE 0xFFFEU |
18 | #define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF |
19 | #define OTX2_QOS_CLASS_NONE 0 |
20 | #define OTX2_QOS_DEFAULT_PRIO 0xF |
21 | #define OTX2_QOS_INVALID_SQ 0xFFFF |
22 | #define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF |
23 | #define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0) |
24 | #define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0) |
25 | |
26 | static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf) |
27 | { |
28 | struct otx2_hw *hw = &pfvf->hw; |
29 | int tx_queues, qos_txqs, err; |
30 | |
31 | qos_txqs = bitmap_weight(src: pfvf->qos.qos_sq_bmap, |
32 | OTX2_QOS_MAX_LEAF_NODES); |
33 | |
34 | tx_queues = hw->tx_queues + qos_txqs; |
35 | |
36 | err = netif_set_real_num_tx_queues(dev: pfvf->netdev, txq: tx_queues); |
37 | if (err) { |
38 | netdev_err(dev: pfvf->netdev, |
39 | format: "Failed to set no of Tx queues: %d\n" , tx_queues); |
40 | return; |
41 | } |
42 | } |
43 | |
44 | static void otx2_qos_get_regaddr(struct otx2_qos_node *node, |
45 | struct nix_txschq_config *cfg, |
46 | int index) |
47 | { |
48 | if (node->level == NIX_TXSCH_LVL_SMQ) { |
49 | cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq); |
50 | cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq); |
51 | cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq); |
52 | cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq); |
53 | } else if (node->level == NIX_TXSCH_LVL_TL4) { |
54 | cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq); |
55 | cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq); |
56 | cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq); |
57 | cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq); |
58 | } else if (node->level == NIX_TXSCH_LVL_TL3) { |
59 | cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq); |
60 | cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq); |
61 | cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq); |
62 | cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq); |
63 | } else if (node->level == NIX_TXSCH_LVL_TL2) { |
64 | cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq); |
65 | cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq); |
66 | cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq); |
67 | cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq); |
68 | } |
69 | } |
70 | |
71 | static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum) |
72 | { |
73 | u32 weight; |
74 | |
75 | weight = quantum / pfvf->hw.dwrr_mtu; |
76 | if (quantum % pfvf->hw.dwrr_mtu) |
77 | weight += 1; |
78 | |
79 | return weight; |
80 | } |
81 | |
82 | static void otx2_config_sched_shaping(struct otx2_nic *pfvf, |
83 | struct otx2_qos_node *node, |
84 | struct nix_txschq_config *cfg, |
85 | int *num_regs) |
86 | { |
87 | u32 rr_weight; |
88 | u32 quantum; |
89 | u64 maxrate; |
90 | |
91 | otx2_qos_get_regaddr(node, cfg, index: *num_regs); |
92 | |
93 | /* configure parent txschq */ |
94 | cfg->regval[*num_regs] = node->parent->schq << 16; |
95 | (*num_regs)++; |
96 | |
97 | /* configure prio/quantum */ |
98 | if (node->qid == OTX2_QOS_QID_NONE) { |
99 | cfg->regval[*num_regs] = node->prio << 24 | |
100 | mtu_to_dwrr_weight(pfvf, mtu: pfvf->tx_max_pktlen); |
101 | (*num_regs)++; |
102 | return; |
103 | } |
104 | |
105 | /* configure priority/quantum */ |
106 | if (node->is_static) { |
107 | cfg->regval[*num_regs] = |
108 | (node->schq - node->parent->prio_anchor) << 24; |
109 | } else { |
110 | quantum = node->quantum ? |
111 | node->quantum : pfvf->tx_max_pktlen; |
112 | rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); |
113 | cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 | |
114 | rr_weight; |
115 | } |
116 | (*num_regs)++; |
117 | |
118 | /* configure PIR */ |
119 | maxrate = (node->rate > node->ceil) ? node->rate : node->ceil; |
120 | |
121 | cfg->regval[*num_regs] = |
122 | otx2_get_txschq_rate_regval(nic: pfvf, maxrate, burst: 65536); |
123 | (*num_regs)++; |
124 | |
125 | /* Don't configure CIR when both CIR+PIR not supported |
126 | * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock |
127 | */ |
128 | if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag)) |
129 | return; |
130 | |
131 | cfg->regval[*num_regs] = |
132 | otx2_get_txschq_rate_regval(nic: pfvf, maxrate: node->rate, burst: 65536); |
133 | (*num_regs)++; |
134 | } |
135 | |
136 | static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, |
137 | struct otx2_qos_node *node, |
138 | struct nix_txschq_config *cfg) |
139 | { |
140 | struct otx2_hw *hw = &pfvf->hw; |
141 | int num_regs = 0; |
142 | u8 level; |
143 | |
144 | level = node->level; |
145 | |
146 | /* program txschq registers */ |
147 | if (level == NIX_TXSCH_LVL_SMQ) { |
148 | cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq); |
149 | cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) | |
150 | OTX2_MIN_MTU; |
151 | cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) | |
152 | (0x2ULL << 36); |
153 | num_regs++; |
154 | |
155 | otx2_config_sched_shaping(pfvf, node, cfg, num_regs: &num_regs); |
156 | |
157 | } else if (level == NIX_TXSCH_LVL_TL4) { |
158 | otx2_config_sched_shaping(pfvf, node, cfg, num_regs: &num_regs); |
159 | } else if (level == NIX_TXSCH_LVL_TL3) { |
160 | /* configure link cfg */ |
161 | if (level == pfvf->qos.link_cfg_lvl) { |
162 | cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); |
163 | cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); |
164 | num_regs++; |
165 | } |
166 | |
167 | otx2_config_sched_shaping(pfvf, node, cfg, num_regs: &num_regs); |
168 | } else if (level == NIX_TXSCH_LVL_TL2) { |
169 | /* configure link cfg */ |
170 | if (level == pfvf->qos.link_cfg_lvl) { |
171 | cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); |
172 | cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); |
173 | num_regs++; |
174 | } |
175 | |
176 | /* check if node is root */ |
177 | if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { |
178 | cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq); |
179 | cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 | |
180 | mtu_to_dwrr_weight(pfvf, |
181 | mtu: pfvf->tx_max_pktlen); |
182 | num_regs++; |
183 | goto txschq_cfg_out; |
184 | } |
185 | |
186 | otx2_config_sched_shaping(pfvf, node, cfg, num_regs: &num_regs); |
187 | } |
188 | |
189 | txschq_cfg_out: |
190 | cfg->num_regs = num_regs; |
191 | } |
192 | |
193 | static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf, |
194 | struct otx2_qos_node *parent) |
195 | { |
196 | struct mbox *mbox = &pfvf->mbox; |
197 | struct nix_txschq_config *cfg; |
198 | int rc; |
199 | |
200 | if (parent->level == NIX_TXSCH_LVL_MDQ) |
201 | return 0; |
202 | |
203 | mutex_lock(&mbox->lock); |
204 | |
205 | cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox: &pfvf->mbox); |
206 | if (!cfg) { |
207 | mutex_unlock(lock: &mbox->lock); |
208 | return -ENOMEM; |
209 | } |
210 | |
211 | cfg->lvl = parent->level; |
212 | |
213 | if (parent->level == NIX_TXSCH_LVL_TL4) |
214 | cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq); |
215 | else if (parent->level == NIX_TXSCH_LVL_TL3) |
216 | cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq); |
217 | else if (parent->level == NIX_TXSCH_LVL_TL2) |
218 | cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq); |
219 | else if (parent->level == NIX_TXSCH_LVL_TL1) |
220 | cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq); |
221 | |
222 | cfg->regval[0] = (u64)parent->prio_anchor << 32; |
223 | cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ? |
224 | parent->child_dwrr_prio : 0) << 1; |
225 | cfg->num_regs++; |
226 | |
227 | rc = otx2_sync_mbox_msg(mbox: &pfvf->mbox); |
228 | |
229 | mutex_unlock(lock: &mbox->lock); |
230 | |
231 | return rc; |
232 | } |
233 | |
234 | static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf, |
235 | struct otx2_qos_node *parent) |
236 | { |
237 | struct otx2_qos_node *node; |
238 | |
239 | list_for_each_entry_reverse(node, &parent->child_schq_list, list) |
240 | otx2_txschq_free_one(pfvf, lvl: node->level, schq: node->schq); |
241 | } |
242 | |
243 | static void otx2_qos_free_hw_node(struct otx2_nic *pfvf, |
244 | struct otx2_qos_node *parent) |
245 | { |
246 | struct otx2_qos_node *node, *tmp; |
247 | |
248 | list_for_each_entry_safe(node, tmp, &parent->child_list, list) { |
249 | otx2_qos_free_hw_node(pfvf, parent: node); |
250 | otx2_qos_free_hw_node_schq(pfvf, parent: node); |
251 | otx2_txschq_free_one(pfvf, lvl: node->level, schq: node->schq); |
252 | } |
253 | } |
254 | |
255 | static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf, |
256 | struct otx2_qos_node *node) |
257 | { |
258 | mutex_lock(&pfvf->qos.qos_lock); |
259 | |
260 | /* free child node hw mappings */ |
261 | otx2_qos_free_hw_node(pfvf, parent: node); |
262 | otx2_qos_free_hw_node_schq(pfvf, parent: node); |
263 | |
264 | /* free node hw mappings */ |
265 | otx2_txschq_free_one(pfvf, lvl: node->level, schq: node->schq); |
266 | |
267 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
268 | } |
269 | |
270 | static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf, |
271 | struct otx2_qos_node *node) |
272 | { |
273 | hash_del_rcu(node: &node->hlist); |
274 | |
275 | if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) { |
276 | __clear_bit(node->qid, pfvf->qos.qos_sq_bmap); |
277 | otx2_qos_update_tx_netdev_queues(pfvf); |
278 | } |
279 | |
280 | list_del(entry: &node->list); |
281 | kfree(objp: node); |
282 | } |
283 | |
284 | static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf, |
285 | struct otx2_qos_node *parent) |
286 | { |
287 | struct otx2_qos_node *node, *tmp; |
288 | |
289 | list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) { |
290 | list_del(entry: &node->list); |
291 | kfree(objp: node); |
292 | } |
293 | } |
294 | |
295 | static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf, |
296 | struct otx2_qos_node *parent) |
297 | { |
298 | struct otx2_qos_node *node, *tmp; |
299 | |
300 | list_for_each_entry_safe(node, tmp, &parent->child_list, list) { |
301 | __otx2_qos_free_sw_node(pfvf, parent: node); |
302 | otx2_qos_free_sw_node_schq(pfvf, parent: node); |
303 | otx2_qos_sw_node_delete(pfvf, node); |
304 | } |
305 | } |
306 | |
307 | static void otx2_qos_free_sw_node(struct otx2_nic *pfvf, |
308 | struct otx2_qos_node *node) |
309 | { |
310 | mutex_lock(&pfvf->qos.qos_lock); |
311 | |
312 | __otx2_qos_free_sw_node(pfvf, parent: node); |
313 | otx2_qos_free_sw_node_schq(pfvf, parent: node); |
314 | otx2_qos_sw_node_delete(pfvf, node); |
315 | |
316 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
317 | } |
318 | |
319 | static void otx2_qos_destroy_node(struct otx2_nic *pfvf, |
320 | struct otx2_qos_node *node) |
321 | { |
322 | otx2_qos_free_hw_cfg(pfvf, node); |
323 | otx2_qos_free_sw_node(pfvf, node); |
324 | } |
325 | |
326 | static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent, |
327 | struct otx2_qos_cfg *cfg) |
328 | { |
329 | struct otx2_qos_node *node; |
330 | |
331 | list_for_each_entry(node, &parent->child_schq_list, list) |
332 | cfg->schq[node->level]++; |
333 | } |
334 | |
335 | static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent, |
336 | struct otx2_qos_cfg *cfg) |
337 | { |
338 | struct otx2_qos_node *node; |
339 | |
340 | list_for_each_entry(node, &parent->child_list, list) { |
341 | otx2_qos_fill_cfg_tl(parent: node, cfg); |
342 | otx2_qos_fill_cfg_schq(parent: node, cfg); |
343 | } |
344 | |
345 | /* Assign the required number of transmit schedular queues under the |
346 | * given class |
347 | */ |
348 | cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt + |
349 | parent->max_static_prio + 1; |
350 | } |
351 | |
352 | static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf, |
353 | struct otx2_qos_node *parent, |
354 | struct otx2_qos_cfg *cfg) |
355 | { |
356 | mutex_lock(&pfvf->qos.qos_lock); |
357 | otx2_qos_fill_cfg_tl(parent, cfg); |
358 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
359 | } |
360 | |
361 | static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent, |
362 | struct otx2_qos_cfg *cfg) |
363 | { |
364 | struct otx2_qos_node *node; |
365 | int cnt; |
366 | |
367 | list_for_each_entry(node, &parent->child_schq_list, list) { |
368 | cnt = cfg->dwrr_node_pos[node->level]; |
369 | cfg->schq_list[node->level][cnt] = node->schq; |
370 | cfg->schq[node->level]++; |
371 | cfg->dwrr_node_pos[node->level]++; |
372 | } |
373 | } |
374 | |
375 | static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent, |
376 | struct otx2_qos_cfg *cfg) |
377 | { |
378 | struct otx2_qos_node *node; |
379 | int cnt; |
380 | |
381 | list_for_each_entry(node, &parent->child_list, list) { |
382 | otx2_qos_read_txschq_cfg_tl(parent: node, cfg); |
383 | cnt = cfg->static_node_pos[node->level]; |
384 | cfg->schq_contig_list[node->level][cnt] = node->schq; |
385 | cfg->schq_index_used[node->level][cnt] = true; |
386 | cfg->schq_contig[node->level]++; |
387 | cfg->static_node_pos[node->level]++; |
388 | otx2_qos_read_txschq_cfg_schq(parent: node, cfg); |
389 | } |
390 | } |
391 | |
392 | static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf, |
393 | struct otx2_qos_node *node, |
394 | struct otx2_qos_cfg *cfg) |
395 | { |
396 | mutex_lock(&pfvf->qos.qos_lock); |
397 | otx2_qos_read_txschq_cfg_tl(parent: node, cfg); |
398 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
399 | } |
400 | |
401 | static struct otx2_qos_node * |
402 | otx2_qos_alloc_root(struct otx2_nic *pfvf) |
403 | { |
404 | struct otx2_qos_node *node; |
405 | |
406 | node = kzalloc(size: sizeof(*node), GFP_KERNEL); |
407 | if (!node) |
408 | return ERR_PTR(error: -ENOMEM); |
409 | |
410 | node->parent = NULL; |
411 | if (!is_otx2_vf(pcifunc: pfvf->pcifunc)) { |
412 | node->level = NIX_TXSCH_LVL_TL1; |
413 | } else { |
414 | node->level = NIX_TXSCH_LVL_TL2; |
415 | node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; |
416 | } |
417 | |
418 | WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); |
419 | node->classid = OTX2_QOS_ROOT_CLASSID; |
420 | |
421 | hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid); |
422 | list_add_tail(new: &node->list, head: &pfvf->qos.qos_tree); |
423 | INIT_LIST_HEAD(list: &node->child_list); |
424 | INIT_LIST_HEAD(list: &node->child_schq_list); |
425 | |
426 | return node; |
427 | } |
428 | |
429 | static int otx2_qos_add_child_node(struct otx2_qos_node *parent, |
430 | struct otx2_qos_node *node) |
431 | { |
432 | struct list_head *head = &parent->child_list; |
433 | struct otx2_qos_node *tmp_node; |
434 | struct list_head *tmp; |
435 | |
436 | if (node->prio > parent->max_static_prio) |
437 | parent->max_static_prio = node->prio; |
438 | |
439 | for (tmp = head->next; tmp != head; tmp = tmp->next) { |
440 | tmp_node = list_entry(tmp, struct otx2_qos_node, list); |
441 | if (tmp_node->prio == node->prio && |
442 | tmp_node->is_static) |
443 | return -EEXIST; |
444 | if (tmp_node->prio > node->prio) { |
445 | list_add_tail(new: &node->list, head: tmp); |
446 | return 0; |
447 | } |
448 | } |
449 | |
450 | list_add_tail(new: &node->list, head); |
451 | return 0; |
452 | } |
453 | |
454 | static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf, |
455 | struct otx2_qos_node *node) |
456 | { |
457 | struct otx2_qos_node *txschq_node, *parent, *tmp; |
458 | int lvl; |
459 | |
460 | parent = node; |
461 | for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) { |
462 | txschq_node = kzalloc(size: sizeof(*txschq_node), GFP_KERNEL); |
463 | if (!txschq_node) |
464 | goto err_out; |
465 | |
466 | txschq_node->parent = parent; |
467 | txschq_node->level = lvl; |
468 | txschq_node->classid = OTX2_QOS_CLASS_NONE; |
469 | WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE); |
470 | txschq_node->rate = 0; |
471 | txschq_node->ceil = 0; |
472 | txschq_node->prio = 0; |
473 | txschq_node->quantum = 0; |
474 | txschq_node->is_static = true; |
475 | txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; |
476 | txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; |
477 | |
478 | mutex_lock(&pfvf->qos.qos_lock); |
479 | list_add_tail(new: &txschq_node->list, head: &node->child_schq_list); |
480 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
481 | |
482 | INIT_LIST_HEAD(list: &txschq_node->child_list); |
483 | INIT_LIST_HEAD(list: &txschq_node->child_schq_list); |
484 | parent = txschq_node; |
485 | } |
486 | |
487 | return 0; |
488 | |
489 | err_out: |
490 | list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list, |
491 | list) { |
492 | list_del(entry: &txschq_node->list); |
493 | kfree(objp: txschq_node); |
494 | } |
495 | return -ENOMEM; |
496 | } |
497 | |
498 | static struct otx2_qos_node * |
499 | otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, |
500 | struct otx2_qos_node *parent, |
501 | u16 classid, u32 prio, u64 rate, u64 ceil, |
502 | u32 quantum, u16 qid, bool static_cfg) |
503 | { |
504 | struct otx2_qos_node *node; |
505 | int err; |
506 | |
507 | node = kzalloc(size: sizeof(*node), GFP_KERNEL); |
508 | if (!node) |
509 | return ERR_PTR(error: -ENOMEM); |
510 | |
511 | node->parent = parent; |
512 | node->level = parent->level - 1; |
513 | node->classid = classid; |
514 | WRITE_ONCE(node->qid, qid); |
515 | |
516 | node->rate = otx2_convert_rate(rate); |
517 | node->ceil = otx2_convert_rate(rate: ceil); |
518 | node->prio = prio; |
519 | node->quantum = quantum; |
520 | node->is_static = static_cfg; |
521 | node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; |
522 | node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; |
523 | |
524 | __set_bit(qid, pfvf->qos.qos_sq_bmap); |
525 | |
526 | hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid); |
527 | |
528 | mutex_lock(&pfvf->qos.qos_lock); |
529 | err = otx2_qos_add_child_node(parent, node); |
530 | if (err) { |
531 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
532 | return ERR_PTR(error: err); |
533 | } |
534 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
535 | |
536 | INIT_LIST_HEAD(list: &node->child_list); |
537 | INIT_LIST_HEAD(list: &node->child_schq_list); |
538 | |
539 | err = otx2_qos_alloc_txschq_node(pfvf, node); |
540 | if (err) { |
541 | otx2_qos_sw_node_delete(pfvf, node); |
542 | return ERR_PTR(error: -ENOMEM); |
543 | } |
544 | |
545 | return node; |
546 | } |
547 | |
548 | static struct otx2_qos_node * |
549 | otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid) |
550 | { |
551 | struct otx2_qos_node *node = NULL; |
552 | |
553 | hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) { |
554 | if (node->classid == classid) |
555 | break; |
556 | } |
557 | |
558 | return node; |
559 | } |
560 | |
561 | static struct otx2_qos_node * |
562 | otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid) |
563 | { |
564 | struct otx2_qos_node *node = NULL; |
565 | |
566 | hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) { |
567 | if (node->classid == classid) |
568 | break; |
569 | } |
570 | |
571 | return node; |
572 | } |
573 | |
574 | int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid) |
575 | { |
576 | struct otx2_qos_node *node; |
577 | u16 qid; |
578 | int res; |
579 | |
580 | node = otx2_sw_node_find_rcu(pfvf, classid); |
581 | if (!node) { |
582 | res = -ENOENT; |
583 | goto out; |
584 | } |
585 | qid = READ_ONCE(node->qid); |
586 | if (qid == OTX2_QOS_QID_INNER) { |
587 | res = -EINVAL; |
588 | goto out; |
589 | } |
590 | res = pfvf->hw.tx_queues + qid; |
591 | out: |
592 | return res; |
593 | } |
594 | |
595 | static int |
596 | otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node) |
597 | { |
598 | struct mbox *mbox = &pfvf->mbox; |
599 | struct nix_txschq_config *req; |
600 | int rc; |
601 | |
602 | mutex_lock(&mbox->lock); |
603 | |
604 | req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox: &pfvf->mbox); |
605 | if (!req) { |
606 | mutex_unlock(lock: &mbox->lock); |
607 | return -ENOMEM; |
608 | } |
609 | |
610 | req->lvl = node->level; |
611 | __otx2_qos_txschq_cfg(pfvf, node, cfg: req); |
612 | |
613 | rc = otx2_sync_mbox_msg(mbox: &pfvf->mbox); |
614 | |
615 | mutex_unlock(lock: &mbox->lock); |
616 | |
617 | return rc; |
618 | } |
619 | |
620 | static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf, |
621 | struct otx2_qos_cfg *cfg) |
622 | { |
623 | struct nix_txsch_alloc_req *req; |
624 | struct nix_txsch_alloc_rsp *rsp; |
625 | struct mbox *mbox = &pfvf->mbox; |
626 | int lvl, rc, schq; |
627 | |
628 | mutex_lock(&mbox->lock); |
629 | req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox: &pfvf->mbox); |
630 | if (!req) { |
631 | mutex_unlock(lock: &mbox->lock); |
632 | return -ENOMEM; |
633 | } |
634 | |
635 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
636 | req->schq[lvl] = cfg->schq[lvl]; |
637 | req->schq_contig[lvl] = cfg->schq_contig[lvl]; |
638 | } |
639 | |
640 | rc = otx2_sync_mbox_msg(mbox: &pfvf->mbox); |
641 | if (rc) { |
642 | mutex_unlock(lock: &mbox->lock); |
643 | return rc; |
644 | } |
645 | |
646 | rsp = (struct nix_txsch_alloc_rsp *) |
647 | otx2_mbox_get_rsp(mbox: &pfvf->mbox.mbox, devid: 0, msg: &req->hdr); |
648 | |
649 | if (IS_ERR(ptr: rsp)) { |
650 | rc = PTR_ERR(ptr: rsp); |
651 | goto out; |
652 | } |
653 | |
654 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
655 | for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) { |
656 | cfg->schq_contig_list[lvl][schq] = |
657 | rsp->schq_contig_list[lvl][schq]; |
658 | } |
659 | } |
660 | |
661 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
662 | for (schq = 0; schq < rsp->schq[lvl]; schq++) { |
663 | cfg->schq_list[lvl][schq] = |
664 | rsp->schq_list[lvl][schq]; |
665 | } |
666 | } |
667 | |
668 | pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl; |
669 | pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; |
670 | |
671 | out: |
672 | mutex_unlock(lock: &mbox->lock); |
673 | return rc; |
674 | } |
675 | |
676 | static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf, |
677 | struct otx2_qos_cfg *cfg) |
678 | { |
679 | int lvl, idx, schq; |
680 | |
681 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
682 | for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { |
683 | if (!cfg->schq_index_used[lvl][idx]) { |
684 | schq = cfg->schq_contig_list[lvl][idx]; |
685 | otx2_txschq_free_one(pfvf, lvl, schq); |
686 | } |
687 | } |
688 | } |
689 | } |
690 | |
691 | static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf, |
692 | struct otx2_qos_node *node, |
693 | struct otx2_qos_cfg *cfg) |
694 | { |
695 | struct otx2_qos_node *tmp; |
696 | int cnt; |
697 | |
698 | list_for_each_entry(tmp, &node->child_schq_list, list) { |
699 | cnt = cfg->dwrr_node_pos[tmp->level]; |
700 | tmp->schq = cfg->schq_list[tmp->level][cnt]; |
701 | cfg->dwrr_node_pos[tmp->level]++; |
702 | } |
703 | } |
704 | |
705 | static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf, |
706 | struct otx2_qos_node *node, |
707 | struct otx2_qos_cfg *cfg) |
708 | { |
709 | struct otx2_qos_node *tmp; |
710 | int cnt; |
711 | |
712 | list_for_each_entry(tmp, &node->child_list, list) { |
713 | otx2_qos_txschq_fill_cfg_tl(pfvf, node: tmp, cfg); |
714 | cnt = cfg->static_node_pos[tmp->level]; |
715 | tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx]; |
716 | cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true; |
717 | if (cnt == 0) |
718 | node->prio_anchor = |
719 | cfg->schq_contig_list[tmp->level][0]; |
720 | cfg->static_node_pos[tmp->level]++; |
721 | otx2_qos_txschq_fill_cfg_schq(pfvf, node: tmp, cfg); |
722 | } |
723 | } |
724 | |
725 | static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf, |
726 | struct otx2_qos_node *node, |
727 | struct otx2_qos_cfg *cfg) |
728 | { |
729 | mutex_lock(&pfvf->qos.qos_lock); |
730 | otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg); |
731 | otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg); |
732 | otx2_qos_free_unused_txschq(pfvf, cfg); |
733 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
734 | } |
735 | |
736 | static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, |
737 | struct otx2_qos_node *tmp, |
738 | unsigned long *child_idx_bmap, |
739 | int child_cnt) |
740 | { |
741 | int idx; |
742 | |
743 | if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX) |
744 | return; |
745 | |
746 | /* assign static nodes 1:1 prio mapping first, then remaining nodes */ |
747 | for (idx = 0; idx < child_cnt; idx++) { |
748 | if (tmp->is_static && tmp->prio == idx && |
749 | !test_bit(idx, child_idx_bmap)) { |
750 | tmp->txschq_idx = idx; |
751 | set_bit(nr: idx, addr: child_idx_bmap); |
752 | return; |
753 | } else if (!tmp->is_static && idx >= tmp->prio && |
754 | !test_bit(idx, child_idx_bmap)) { |
755 | tmp->txschq_idx = idx; |
756 | set_bit(nr: idx, addr: child_idx_bmap); |
757 | return; |
758 | } |
759 | } |
760 | } |
761 | |
762 | static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, |
763 | struct otx2_qos_node *node) |
764 | { |
765 | unsigned long *child_idx_bmap; |
766 | struct otx2_qos_node *tmp; |
767 | int child_cnt; |
768 | |
769 | list_for_each_entry(tmp, &node->child_list, list) |
770 | tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; |
771 | |
772 | /* allocate child index array */ |
773 | child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1; |
774 | child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt), |
775 | size: sizeof(unsigned long), |
776 | GFP_KERNEL); |
777 | if (!child_idx_bmap) |
778 | return -ENOMEM; |
779 | |
780 | list_for_each_entry(tmp, &node->child_list, list) |
781 | otx2_qos_assign_base_idx_tl(pfvf, node: tmp); |
782 | |
783 | /* assign base index of static priority children first */ |
784 | list_for_each_entry(tmp, &node->child_list, list) { |
785 | if (!tmp->is_static) |
786 | continue; |
787 | __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, |
788 | child_cnt); |
789 | } |
790 | |
791 | /* assign base index of dwrr priority children */ |
792 | list_for_each_entry(tmp, &node->child_list, list) |
793 | __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, |
794 | child_cnt); |
795 | |
796 | kfree(objp: child_idx_bmap); |
797 | |
798 | return 0; |
799 | } |
800 | |
801 | static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf, |
802 | struct otx2_qos_node *node) |
803 | { |
804 | int ret = 0; |
805 | |
806 | mutex_lock(&pfvf->qos.qos_lock); |
807 | ret = otx2_qos_assign_base_idx_tl(pfvf, node); |
808 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
809 | |
810 | return ret; |
811 | } |
812 | |
813 | static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf, |
814 | struct otx2_qos_node *node, |
815 | struct otx2_qos_cfg *cfg) |
816 | { |
817 | struct otx2_qos_node *tmp; |
818 | int ret; |
819 | |
820 | list_for_each_entry(tmp, &node->child_schq_list, list) { |
821 | ret = otx2_qos_txschq_config(pfvf, node: tmp); |
822 | if (ret) |
823 | return -EIO; |
824 | ret = otx2_qos_txschq_set_parent_topology(pfvf, parent: tmp->parent); |
825 | if (ret) |
826 | return -EIO; |
827 | } |
828 | |
829 | return 0; |
830 | } |
831 | |
832 | static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf, |
833 | struct otx2_qos_node *node, |
834 | struct otx2_qos_cfg *cfg) |
835 | { |
836 | struct otx2_qos_node *tmp; |
837 | int ret; |
838 | |
839 | list_for_each_entry(tmp, &node->child_list, list) { |
840 | ret = otx2_qos_txschq_push_cfg_tl(pfvf, node: tmp, cfg); |
841 | if (ret) |
842 | return -EIO; |
843 | ret = otx2_qos_txschq_config(pfvf, node: tmp); |
844 | if (ret) |
845 | return -EIO; |
846 | ret = otx2_qos_txschq_push_cfg_schq(pfvf, node: tmp, cfg); |
847 | if (ret) |
848 | return -EIO; |
849 | } |
850 | |
851 | ret = otx2_qos_txschq_set_parent_topology(pfvf, parent: node); |
852 | if (ret) |
853 | return -EIO; |
854 | |
855 | return 0; |
856 | } |
857 | |
858 | static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf, |
859 | struct otx2_qos_node *node, |
860 | struct otx2_qos_cfg *cfg) |
861 | { |
862 | int ret; |
863 | |
864 | mutex_lock(&pfvf->qos.qos_lock); |
865 | ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg); |
866 | if (ret) |
867 | goto out; |
868 | ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg); |
869 | out: |
870 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
871 | return ret; |
872 | } |
873 | |
874 | static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf, |
875 | struct otx2_qos_node *node, |
876 | struct otx2_qos_cfg *cfg) |
877 | { |
878 | otx2_qos_txschq_fill_cfg(pfvf, node, cfg); |
879 | |
880 | return otx2_qos_txschq_push_cfg(pfvf, node, cfg); |
881 | } |
882 | |
883 | static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf, |
884 | struct otx2_qos_node *root, |
885 | struct otx2_qos_cfg *cfg) |
886 | { |
887 | root->schq = cfg->schq_list[root->level][0]; |
888 | return otx2_qos_txschq_config(pfvf, node: root); |
889 | } |
890 | |
891 | static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) |
892 | { |
893 | int lvl, idx, schq; |
894 | |
895 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
896 | for (idx = 0; idx < cfg->schq[lvl]; idx++) { |
897 | schq = cfg->schq_list[lvl][idx]; |
898 | otx2_txschq_free_one(pfvf, lvl, schq); |
899 | } |
900 | } |
901 | |
902 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
903 | for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { |
904 | if (cfg->schq_index_used[lvl][idx]) { |
905 | schq = cfg->schq_contig_list[lvl][idx]; |
906 | otx2_txschq_free_one(pfvf, lvl, schq); |
907 | } |
908 | } |
909 | } |
910 | } |
911 | |
912 | static void otx2_qos_enadis_sq(struct otx2_nic *pfvf, |
913 | struct otx2_qos_node *node, |
914 | u16 qid) |
915 | { |
916 | if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ) |
917 | otx2_qos_disable_sq(pfvf, qidx: qid); |
918 | |
919 | pfvf->qos.qid_to_sqmap[qid] = node->schq; |
920 | otx2_qos_enable_sq(pfvf, qidx: qid); |
921 | } |
922 | |
923 | static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf, |
924 | struct otx2_qos_node *node, |
925 | bool action) |
926 | { |
927 | struct otx2_qos_node *tmp; |
928 | |
929 | if (node->qid == OTX2_QOS_QID_INNER) |
930 | return; |
931 | |
932 | list_for_each_entry(tmp, &node->child_schq_list, list) { |
933 | if (tmp->level == NIX_TXSCH_LVL_MDQ) { |
934 | if (action == QOS_SMQ_FLUSH) |
935 | otx2_smq_flush(pfvf, smq: tmp->schq); |
936 | else |
937 | otx2_qos_enadis_sq(pfvf, node: tmp, qid: node->qid); |
938 | } |
939 | } |
940 | } |
941 | |
942 | static void __otx2_qos_update_smq(struct otx2_nic *pfvf, |
943 | struct otx2_qos_node *node, |
944 | bool action) |
945 | { |
946 | struct otx2_qos_node *tmp; |
947 | |
948 | list_for_each_entry(tmp, &node->child_list, list) { |
949 | __otx2_qos_update_smq(pfvf, node: tmp, action); |
950 | if (tmp->qid == OTX2_QOS_QID_INNER) |
951 | continue; |
952 | if (tmp->level == NIX_TXSCH_LVL_MDQ) { |
953 | if (action == QOS_SMQ_FLUSH) |
954 | otx2_smq_flush(pfvf, smq: tmp->schq); |
955 | else |
956 | otx2_qos_enadis_sq(pfvf, node: tmp, qid: tmp->qid); |
957 | } else { |
958 | otx2_qos_update_smq_schq(pfvf, node: tmp, action); |
959 | } |
960 | } |
961 | } |
962 | |
963 | static void otx2_qos_update_smq(struct otx2_nic *pfvf, |
964 | struct otx2_qos_node *node, |
965 | bool action) |
966 | { |
967 | mutex_lock(&pfvf->qos.qos_lock); |
968 | __otx2_qos_update_smq(pfvf, node, action); |
969 | otx2_qos_update_smq_schq(pfvf, node, action); |
970 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
971 | } |
972 | |
973 | static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf, |
974 | struct otx2_qos_node *node, |
975 | struct otx2_qos_cfg *cfg) |
976 | { |
977 | int ret; |
978 | |
979 | ret = otx2_qos_txschq_alloc(pfvf, cfg); |
980 | if (ret) |
981 | return -ENOSPC; |
982 | |
983 | ret = otx2_qos_assign_base_idx(pfvf, node); |
984 | if (ret) |
985 | return -ENOMEM; |
986 | |
987 | if (!(pfvf->netdev->flags & IFF_UP)) { |
988 | otx2_qos_txschq_fill_cfg(pfvf, node, cfg); |
989 | return 0; |
990 | } |
991 | |
992 | ret = otx2_qos_txschq_update_config(pfvf, node, cfg); |
993 | if (ret) { |
994 | otx2_qos_free_cfg(pfvf, cfg); |
995 | return -EIO; |
996 | } |
997 | |
998 | otx2_qos_update_smq(pfvf, node, action: QOS_CFG_SQ); |
999 | |
1000 | return 0; |
1001 | } |
1002 | |
1003 | static int otx2_qos_update_tree(struct otx2_nic *pfvf, |
1004 | struct otx2_qos_node *node, |
1005 | struct otx2_qos_cfg *cfg) |
1006 | { |
1007 | otx2_qos_prepare_txschq_cfg(pfvf, parent: node->parent, cfg); |
1008 | return otx2_qos_push_txschq_cfg(pfvf, node: node->parent, cfg); |
1009 | } |
1010 | |
1011 | static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls, |
1012 | struct netlink_ext_ack *extack) |
1013 | { |
1014 | struct otx2_qos_cfg *new_cfg; |
1015 | struct otx2_qos_node *root; |
1016 | int err; |
1017 | |
1018 | netdev_dbg(pfvf->netdev, |
1019 | "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n" , |
1020 | htb_maj_id, htb_defcls); |
1021 | |
1022 | root = otx2_qos_alloc_root(pfvf); |
1023 | if (IS_ERR(ptr: root)) { |
1024 | err = PTR_ERR(ptr: root); |
1025 | return err; |
1026 | } |
1027 | |
1028 | /* allocate txschq queue */ |
1029 | new_cfg = kzalloc(size: sizeof(*new_cfg), GFP_KERNEL); |
1030 | if (!new_cfg) { |
1031 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1032 | err = -ENOMEM; |
1033 | goto free_root_node; |
1034 | } |
1035 | /* allocate htb root node */ |
1036 | new_cfg->schq[root->level] = 1; |
1037 | err = otx2_qos_txschq_alloc(pfvf, cfg: new_cfg); |
1038 | if (err) { |
1039 | NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq" ); |
1040 | goto free_root_node; |
1041 | } |
1042 | |
1043 | /* Update TL1 RR PRIO */ |
1044 | if (root->level == NIX_TXSCH_LVL_TL1) { |
1045 | root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio; |
1046 | netdev_dbg(pfvf->netdev, |
1047 | "TL1 DWRR Priority %d\n" , root->child_dwrr_prio); |
1048 | } |
1049 | |
1050 | if (!(pfvf->netdev->flags & IFF_UP) || |
1051 | root->level == NIX_TXSCH_LVL_TL1) { |
1052 | root->schq = new_cfg->schq_list[root->level][0]; |
1053 | goto out; |
1054 | } |
1055 | |
1056 | /* update the txschq configuration in hw */ |
1057 | err = otx2_qos_txschq_update_root_cfg(pfvf, root, cfg: new_cfg); |
1058 | if (err) { |
1059 | NL_SET_ERR_MSG_MOD(extack, |
1060 | "Error updating txschq configuration" ); |
1061 | goto txschq_free; |
1062 | } |
1063 | |
1064 | out: |
1065 | WRITE_ONCE(pfvf->qos.defcls, htb_defcls); |
1066 | /* Pairs with smp_load_acquire() in ndo_select_queue */ |
1067 | smp_store_release(&pfvf->qos.maj_id, htb_maj_id); |
1068 | kfree(objp: new_cfg); |
1069 | return 0; |
1070 | |
1071 | txschq_free: |
1072 | otx2_qos_free_cfg(pfvf, cfg: new_cfg); |
1073 | free_root_node: |
1074 | kfree(objp: new_cfg); |
1075 | otx2_qos_sw_node_delete(pfvf, node: root); |
1076 | return err; |
1077 | } |
1078 | |
1079 | static int otx2_qos_root_destroy(struct otx2_nic *pfvf) |
1080 | { |
1081 | struct otx2_qos_node *root; |
1082 | |
1083 | netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n" ); |
1084 | |
1085 | /* find root node */ |
1086 | root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); |
1087 | if (!root) |
1088 | return -ENOENT; |
1089 | |
1090 | /* free the hw mappings */ |
1091 | otx2_qos_destroy_node(pfvf, node: root); |
1092 | |
1093 | return 0; |
1094 | } |
1095 | |
1096 | static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum) |
1097 | { |
1098 | u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); |
1099 | int err = 0; |
1100 | |
1101 | /* Max Round robin weight supported by octeontx2 and CN10K |
1102 | * is different. Validate accordingly |
1103 | */ |
1104 | if (is_dev_otx2(pdev: pfvf->pdev)) |
1105 | err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0; |
1106 | else if (rr_weight > CN10K_MAX_RR_WEIGHT) |
1107 | err = -EINVAL; |
1108 | |
1109 | return err; |
1110 | } |
1111 | |
1112 | static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent, |
1113 | struct netlink_ext_ack *extack, |
1114 | struct otx2_nic *pfvf, |
1115 | u64 prio, u64 quantum) |
1116 | { |
1117 | int err; |
1118 | |
1119 | err = otx2_qos_validate_quantum(pfvf, quantum); |
1120 | if (err) { |
1121 | NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value" ); |
1122 | return err; |
1123 | } |
1124 | |
1125 | if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) { |
1126 | parent->child_dwrr_prio = prio; |
1127 | } else if (prio != parent->child_dwrr_prio) { |
1128 | NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed" ); |
1129 | return -EOPNOTSUPP; |
1130 | } |
1131 | |
1132 | return 0; |
1133 | } |
1134 | |
1135 | static int otx2_qos_validate_configuration(struct otx2_qos_node *parent, |
1136 | struct netlink_ext_ack *extack, |
1137 | struct otx2_nic *pfvf, |
1138 | u64 prio, bool static_cfg) |
1139 | { |
1140 | if (prio == parent->child_dwrr_prio && static_cfg) { |
1141 | NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists" ); |
1142 | return -EEXIST; |
1143 | } |
1144 | |
1145 | if (static_cfg && test_bit(prio, parent->prio_bmap)) { |
1146 | NL_SET_ERR_MSG_MOD(extack, |
1147 | "Static priority child with same priority exists" ); |
1148 | return -EEXIST; |
1149 | } |
1150 | |
1151 | return 0; |
1152 | } |
1153 | |
1154 | static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio) |
1155 | { |
1156 | /* For PF, root node dwrr priority is static */ |
1157 | if (parent->level == NIX_TXSCH_LVL_TL1) |
1158 | return; |
1159 | |
1160 | if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) { |
1161 | parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; |
1162 | clear_bit(nr: prio, addr: parent->prio_bmap); |
1163 | } |
1164 | } |
1165 | |
1166 | static bool is_qos_node_dwrr(struct otx2_qos_node *parent, |
1167 | struct otx2_nic *pfvf, |
1168 | u64 prio) |
1169 | { |
1170 | struct otx2_qos_node *node; |
1171 | bool ret = false; |
1172 | |
1173 | if (parent->child_dwrr_prio == prio) |
1174 | return true; |
1175 | |
1176 | mutex_lock(&pfvf->qos.qos_lock); |
1177 | list_for_each_entry(node, &parent->child_list, list) { |
1178 | if (prio == node->prio) { |
1179 | if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO && |
1180 | parent->child_dwrr_prio != prio) |
1181 | continue; |
1182 | |
1183 | if (otx2_qos_validate_quantum(pfvf, quantum: node->quantum)) { |
1184 | netdev_err(dev: pfvf->netdev, |
1185 | format: "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d" , |
1186 | node->classid, node->quantum, |
1187 | node->prio); |
1188 | break; |
1189 | } |
1190 | /* mark old node as dwrr */ |
1191 | node->is_static = false; |
1192 | parent->child_dwrr_cnt++; |
1193 | parent->child_static_cnt--; |
1194 | ret = true; |
1195 | break; |
1196 | } |
1197 | } |
1198 | mutex_unlock(lock: &pfvf->qos.qos_lock); |
1199 | |
1200 | return ret; |
1201 | } |
1202 | |
1203 | static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, |
1204 | u32 parent_classid, u64 rate, u64 ceil, |
1205 | u64 prio, u32 quantum, |
1206 | struct netlink_ext_ack *extack) |
1207 | { |
1208 | struct otx2_qos_cfg *old_cfg, *new_cfg; |
1209 | struct otx2_qos_node *node, *parent; |
1210 | int qid, ret, err; |
1211 | bool static_cfg; |
1212 | |
1213 | netdev_dbg(pfvf->netdev, |
1214 | "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n" , |
1215 | classid, parent_classid, rate, ceil, prio, quantum); |
1216 | |
1217 | if (prio > OTX2_QOS_MAX_PRIO) { |
1218 | NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7" ); |
1219 | ret = -EOPNOTSUPP; |
1220 | goto out; |
1221 | } |
1222 | |
1223 | if (!quantum || quantum > INT_MAX) { |
1224 | NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes" ); |
1225 | ret = -EOPNOTSUPP; |
1226 | goto out; |
1227 | } |
1228 | |
1229 | /* get parent node */ |
1230 | parent = otx2_sw_node_find(pfvf, classid: parent_classid); |
1231 | if (!parent) { |
1232 | NL_SET_ERR_MSG_MOD(extack, "parent node not found" ); |
1233 | ret = -ENOENT; |
1234 | goto out; |
1235 | } |
1236 | if (parent->level == NIX_TXSCH_LVL_MDQ) { |
1237 | NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached" ); |
1238 | ret = -EOPNOTSUPP; |
1239 | goto out; |
1240 | } |
1241 | |
1242 | static_cfg = !is_qos_node_dwrr(parent, pfvf, prio); |
1243 | ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio, |
1244 | static_cfg); |
1245 | if (ret) |
1246 | goto out; |
1247 | |
1248 | if (!static_cfg) { |
1249 | ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio, |
1250 | quantum); |
1251 | if (ret) |
1252 | goto out; |
1253 | } |
1254 | |
1255 | if (static_cfg) |
1256 | parent->child_static_cnt++; |
1257 | else |
1258 | parent->child_dwrr_cnt++; |
1259 | |
1260 | set_bit(nr: prio, addr: parent->prio_bmap); |
1261 | |
1262 | /* read current txschq configuration */ |
1263 | old_cfg = kzalloc(size: sizeof(*old_cfg), GFP_KERNEL); |
1264 | if (!old_cfg) { |
1265 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1266 | ret = -ENOMEM; |
1267 | goto reset_prio; |
1268 | } |
1269 | otx2_qos_read_txschq_cfg(pfvf, node: parent, cfg: old_cfg); |
1270 | |
1271 | /* allocate a new sq */ |
1272 | qid = otx2_qos_get_qid(pfvf); |
1273 | if (qid < 0) { |
1274 | NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's" ); |
1275 | ret = -ENOMEM; |
1276 | goto free_old_cfg; |
1277 | } |
1278 | |
1279 | /* Actual SQ mapping will be updated after SMQ alloc */ |
1280 | pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; |
1281 | |
1282 | /* allocate and initialize a new child node */ |
1283 | node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate, |
1284 | ceil, quantum, qid, static_cfg); |
1285 | if (IS_ERR(ptr: node)) { |
1286 | NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node" ); |
1287 | ret = PTR_ERR(ptr: node); |
1288 | goto free_old_cfg; |
1289 | } |
1290 | |
1291 | /* push new txschq config to hw */ |
1292 | new_cfg = kzalloc(size: sizeof(*new_cfg), GFP_KERNEL); |
1293 | if (!new_cfg) { |
1294 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1295 | ret = -ENOMEM; |
1296 | goto free_node; |
1297 | } |
1298 | ret = otx2_qos_update_tree(pfvf, node, cfg: new_cfg); |
1299 | if (ret) { |
1300 | NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error" ); |
1301 | kfree(objp: new_cfg); |
1302 | otx2_qos_sw_node_delete(pfvf, node); |
1303 | /* restore the old qos tree */ |
1304 | err = otx2_qos_txschq_update_config(pfvf, node: parent, cfg: old_cfg); |
1305 | if (err) { |
1306 | netdev_err(dev: pfvf->netdev, |
1307 | format: "Failed to restore txcshq configuration" ); |
1308 | goto free_old_cfg; |
1309 | } |
1310 | |
1311 | otx2_qos_update_smq(pfvf, node: parent, action: QOS_CFG_SQ); |
1312 | goto free_old_cfg; |
1313 | } |
1314 | |
1315 | /* update tx_real_queues */ |
1316 | otx2_qos_update_tx_netdev_queues(pfvf); |
1317 | |
1318 | /* free new txschq config */ |
1319 | kfree(objp: new_cfg); |
1320 | |
1321 | /* free old txschq config */ |
1322 | otx2_qos_free_cfg(pfvf, cfg: old_cfg); |
1323 | kfree(objp: old_cfg); |
1324 | |
1325 | return pfvf->hw.tx_queues + qid; |
1326 | |
1327 | free_node: |
1328 | otx2_qos_sw_node_delete(pfvf, node); |
1329 | free_old_cfg: |
1330 | kfree(objp: old_cfg); |
1331 | reset_prio: |
1332 | if (static_cfg) |
1333 | parent->child_static_cnt--; |
1334 | else |
1335 | parent->child_dwrr_cnt--; |
1336 | |
1337 | clear_bit(nr: prio, addr: parent->prio_bmap); |
1338 | out: |
1339 | return ret; |
1340 | } |
1341 | |
1342 | static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, |
1343 | u16 child_classid, u64 rate, u64 ceil, u64 prio, |
1344 | u32 quantum, struct netlink_ext_ack *extack) |
1345 | { |
1346 | struct otx2_qos_cfg *old_cfg, *new_cfg; |
1347 | struct otx2_qos_node *node, *child; |
1348 | bool static_cfg; |
1349 | int ret, err; |
1350 | u16 qid; |
1351 | |
1352 | netdev_dbg(pfvf->netdev, |
1353 | "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n" , |
1354 | classid, child_classid, rate, ceil); |
1355 | |
1356 | if (prio > OTX2_QOS_MAX_PRIO) { |
1357 | NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7" ); |
1358 | ret = -EOPNOTSUPP; |
1359 | goto out; |
1360 | } |
1361 | |
1362 | if (!quantum || quantum > INT_MAX) { |
1363 | NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes" ); |
1364 | ret = -EOPNOTSUPP; |
1365 | goto out; |
1366 | } |
1367 | |
1368 | /* find node related to classid */ |
1369 | node = otx2_sw_node_find(pfvf, classid); |
1370 | if (!node) { |
1371 | NL_SET_ERR_MSG_MOD(extack, "HTB node not found" ); |
1372 | ret = -ENOENT; |
1373 | goto out; |
1374 | } |
1375 | /* check max qos txschq level */ |
1376 | if (node->level == NIX_TXSCH_LVL_MDQ) { |
1377 | NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported" ); |
1378 | ret = -EOPNOTSUPP; |
1379 | goto out; |
1380 | } |
1381 | |
1382 | static_cfg = !is_qos_node_dwrr(parent: node, pfvf, prio); |
1383 | if (!static_cfg) { |
1384 | ret = otx2_qos_validate_dwrr_cfg(parent: node, extack, pfvf, prio, |
1385 | quantum); |
1386 | if (ret) |
1387 | goto out; |
1388 | } |
1389 | |
1390 | if (static_cfg) |
1391 | node->child_static_cnt++; |
1392 | else |
1393 | node->child_dwrr_cnt++; |
1394 | |
1395 | set_bit(nr: prio, addr: node->prio_bmap); |
1396 | |
1397 | /* store the qid to assign to leaf node */ |
1398 | qid = node->qid; |
1399 | |
1400 | /* read current txschq configuration */ |
1401 | old_cfg = kzalloc(size: sizeof(*old_cfg), GFP_KERNEL); |
1402 | if (!old_cfg) { |
1403 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1404 | ret = -ENOMEM; |
1405 | goto reset_prio; |
1406 | } |
1407 | otx2_qos_read_txschq_cfg(pfvf, node, cfg: old_cfg); |
1408 | |
1409 | /* delete the txschq nodes allocated for this node */ |
1410 | otx2_qos_free_sw_node_schq(pfvf, parent: node); |
1411 | |
1412 | /* mark this node as htb inner node */ |
1413 | WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); |
1414 | |
1415 | /* allocate and initialize a new child node */ |
1416 | child = otx2_qos_sw_create_leaf_node(pfvf, parent: node, classid: child_classid, |
1417 | prio, rate, ceil, quantum, |
1418 | qid, static_cfg); |
1419 | if (IS_ERR(ptr: child)) { |
1420 | NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node" ); |
1421 | ret = PTR_ERR(ptr: child); |
1422 | goto free_old_cfg; |
1423 | } |
1424 | |
1425 | /* push new txschq config to hw */ |
1426 | new_cfg = kzalloc(size: sizeof(*new_cfg), GFP_KERNEL); |
1427 | if (!new_cfg) { |
1428 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1429 | ret = -ENOMEM; |
1430 | goto free_node; |
1431 | } |
1432 | ret = otx2_qos_update_tree(pfvf, node: child, cfg: new_cfg); |
1433 | if (ret) { |
1434 | NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error" ); |
1435 | kfree(objp: new_cfg); |
1436 | otx2_qos_sw_node_delete(pfvf, node: child); |
1437 | /* restore the old qos tree */ |
1438 | WRITE_ONCE(node->qid, qid); |
1439 | err = otx2_qos_alloc_txschq_node(pfvf, node); |
1440 | if (err) { |
1441 | netdev_err(dev: pfvf->netdev, |
1442 | format: "Failed to restore old leaf node" ); |
1443 | goto free_old_cfg; |
1444 | } |
1445 | err = otx2_qos_txschq_update_config(pfvf, node, cfg: old_cfg); |
1446 | if (err) { |
1447 | netdev_err(dev: pfvf->netdev, |
1448 | format: "Failed to restore txcshq configuration" ); |
1449 | goto free_old_cfg; |
1450 | } |
1451 | otx2_qos_update_smq(pfvf, node, action: QOS_CFG_SQ); |
1452 | goto free_old_cfg; |
1453 | } |
1454 | |
1455 | /* free new txschq config */ |
1456 | kfree(objp: new_cfg); |
1457 | |
1458 | /* free old txschq config */ |
1459 | otx2_qos_free_cfg(pfvf, cfg: old_cfg); |
1460 | kfree(objp: old_cfg); |
1461 | |
1462 | return 0; |
1463 | |
1464 | free_node: |
1465 | otx2_qos_sw_node_delete(pfvf, node: child); |
1466 | free_old_cfg: |
1467 | kfree(objp: old_cfg); |
1468 | reset_prio: |
1469 | if (static_cfg) |
1470 | node->child_static_cnt--; |
1471 | else |
1472 | node->child_dwrr_cnt--; |
1473 | clear_bit(nr: prio, addr: node->prio_bmap); |
1474 | out: |
1475 | return ret; |
1476 | } |
1477 | |
1478 | static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, |
1479 | struct netlink_ext_ack *extack) |
1480 | { |
1481 | struct otx2_qos_node *node, *parent; |
1482 | int dwrr_del_node = false; |
1483 | u64 prio; |
1484 | u16 qid; |
1485 | |
1486 | netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n" , *classid); |
1487 | |
1488 | /* find node related to classid */ |
1489 | node = otx2_sw_node_find(pfvf, classid: *classid); |
1490 | if (!node) { |
1491 | NL_SET_ERR_MSG_MOD(extack, "HTB node not found" ); |
1492 | return -ENOENT; |
1493 | } |
1494 | parent = node->parent; |
1495 | prio = node->prio; |
1496 | qid = node->qid; |
1497 | |
1498 | if (!node->is_static) |
1499 | dwrr_del_node = true; |
1500 | |
1501 | otx2_qos_disable_sq(pfvf, qidx: node->qid); |
1502 | |
1503 | otx2_qos_destroy_node(pfvf, node); |
1504 | pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; |
1505 | |
1506 | if (dwrr_del_node) { |
1507 | parent->child_dwrr_cnt--; |
1508 | } else { |
1509 | parent->child_static_cnt--; |
1510 | clear_bit(nr: prio, addr: parent->prio_bmap); |
1511 | } |
1512 | |
1513 | /* Reset DWRR priority if all dwrr nodes are deleted */ |
1514 | if (!parent->child_dwrr_cnt) |
1515 | otx2_reset_dwrr_prio(parent, prio); |
1516 | |
1517 | if (!parent->child_static_cnt) |
1518 | parent->max_static_prio = 0; |
1519 | |
1520 | return 0; |
1521 | } |
1522 | |
1523 | static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force, |
1524 | struct netlink_ext_ack *extack) |
1525 | { |
1526 | struct otx2_qos_node *node, *parent; |
1527 | struct otx2_qos_cfg *new_cfg; |
1528 | int dwrr_del_node = false; |
1529 | u64 prio; |
1530 | int err; |
1531 | u16 qid; |
1532 | |
1533 | netdev_dbg(pfvf->netdev, |
1534 | "TC_HTB_LEAF_DEL_LAST classid %04x\n" , classid); |
1535 | |
1536 | /* find node related to classid */ |
1537 | node = otx2_sw_node_find(pfvf, classid); |
1538 | if (!node) { |
1539 | NL_SET_ERR_MSG_MOD(extack, "HTB node not found" ); |
1540 | return -ENOENT; |
1541 | } |
1542 | |
1543 | /* save qid for use by parent */ |
1544 | qid = node->qid; |
1545 | prio = node->prio; |
1546 | |
1547 | parent = otx2_sw_node_find(pfvf, classid: node->parent->classid); |
1548 | if (!parent) { |
1549 | NL_SET_ERR_MSG_MOD(extack, "parent node not found" ); |
1550 | return -ENOENT; |
1551 | } |
1552 | |
1553 | if (!node->is_static) |
1554 | dwrr_del_node = true; |
1555 | |
1556 | /* destroy the leaf node */ |
1557 | otx2_qos_destroy_node(pfvf, node); |
1558 | pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; |
1559 | |
1560 | if (dwrr_del_node) { |
1561 | parent->child_dwrr_cnt--; |
1562 | } else { |
1563 | parent->child_static_cnt--; |
1564 | clear_bit(nr: prio, addr: parent->prio_bmap); |
1565 | } |
1566 | |
1567 | /* Reset DWRR priority if all dwrr nodes are deleted */ |
1568 | if (!parent->child_dwrr_cnt) |
1569 | otx2_reset_dwrr_prio(parent, prio); |
1570 | |
1571 | if (!parent->child_static_cnt) |
1572 | parent->max_static_prio = 0; |
1573 | |
1574 | /* create downstream txschq entries to parent */ |
1575 | err = otx2_qos_alloc_txschq_node(pfvf, node: parent); |
1576 | if (err) { |
1577 | NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration" ); |
1578 | return err; |
1579 | } |
1580 | WRITE_ONCE(parent->qid, qid); |
1581 | __set_bit(qid, pfvf->qos.qos_sq_bmap); |
1582 | |
1583 | /* push new txschq config to hw */ |
1584 | new_cfg = kzalloc(size: sizeof(*new_cfg), GFP_KERNEL); |
1585 | if (!new_cfg) { |
1586 | NL_SET_ERR_MSG_MOD(extack, "Memory allocation error" ); |
1587 | return -ENOMEM; |
1588 | } |
1589 | /* fill txschq cfg and push txschq cfg to hw */ |
1590 | otx2_qos_fill_cfg_schq(parent, cfg: new_cfg); |
1591 | err = otx2_qos_push_txschq_cfg(pfvf, node: parent, cfg: new_cfg); |
1592 | if (err) { |
1593 | NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error" ); |
1594 | kfree(objp: new_cfg); |
1595 | return err; |
1596 | } |
1597 | kfree(objp: new_cfg); |
1598 | |
1599 | /* update tx_real_queues */ |
1600 | otx2_qos_update_tx_netdev_queues(pfvf); |
1601 | |
1602 | return 0; |
1603 | } |
1604 | |
1605 | void otx2_clean_qos_queues(struct otx2_nic *pfvf) |
1606 | { |
1607 | struct otx2_qos_node *root; |
1608 | |
1609 | root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); |
1610 | if (!root) |
1611 | return; |
1612 | |
1613 | otx2_qos_update_smq(pfvf, node: root, action: QOS_SMQ_FLUSH); |
1614 | } |
1615 | |
1616 | void otx2_qos_config_txschq(struct otx2_nic *pfvf) |
1617 | { |
1618 | struct otx2_qos_node *root; |
1619 | int err; |
1620 | |
1621 | root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); |
1622 | if (!root) |
1623 | return; |
1624 | |
1625 | if (root->level != NIX_TXSCH_LVL_TL1) { |
1626 | err = otx2_qos_txschq_config(pfvf, node: root); |
1627 | if (err) { |
1628 | netdev_err(dev: pfvf->netdev, format: "Error update txschq configuration\n" ); |
1629 | goto root_destroy; |
1630 | } |
1631 | } |
1632 | |
1633 | err = otx2_qos_txschq_push_cfg_tl(pfvf, node: root, NULL); |
1634 | if (err) { |
1635 | netdev_err(dev: pfvf->netdev, format: "Error update txschq configuration\n" ); |
1636 | goto root_destroy; |
1637 | } |
1638 | |
1639 | otx2_qos_update_smq(pfvf, node: root, action: QOS_CFG_SQ); |
1640 | return; |
1641 | |
1642 | root_destroy: |
1643 | netdev_err(dev: pfvf->netdev, format: "Failed to update Scheduler/Shaping config in Hardware\n" ); |
1644 | /* Free resources allocated */ |
1645 | otx2_qos_root_destroy(pfvf); |
1646 | } |
1647 | |
1648 | int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) |
1649 | { |
1650 | struct otx2_nic *pfvf = netdev_priv(dev: ndev); |
1651 | int res; |
1652 | |
1653 | switch (htb->command) { |
1654 | case TC_HTB_CREATE: |
1655 | return otx2_qos_root_add(pfvf, htb_maj_id: htb->parent_classid, |
1656 | htb_defcls: htb->classid, extack: htb->extack); |
1657 | case TC_HTB_DESTROY: |
1658 | return otx2_qos_root_destroy(pfvf); |
1659 | case TC_HTB_LEAF_ALLOC_QUEUE: |
1660 | res = otx2_qos_leaf_alloc_queue(pfvf, classid: htb->classid, |
1661 | parent_classid: htb->parent_classid, |
1662 | rate: htb->rate, ceil: htb->ceil, |
1663 | prio: htb->prio, quantum: htb->quantum, |
1664 | extack: htb->extack); |
1665 | if (res < 0) |
1666 | return res; |
1667 | htb->qid = res; |
1668 | return 0; |
1669 | case TC_HTB_LEAF_TO_INNER: |
1670 | return otx2_qos_leaf_to_inner(pfvf, classid: htb->parent_classid, |
1671 | child_classid: htb->classid, rate: htb->rate, |
1672 | ceil: htb->ceil, prio: htb->prio, |
1673 | quantum: htb->quantum, extack: htb->extack); |
1674 | case TC_HTB_LEAF_DEL: |
1675 | return otx2_qos_leaf_del(pfvf, classid: &htb->classid, extack: htb->extack); |
1676 | case TC_HTB_LEAF_DEL_LAST: |
1677 | case TC_HTB_LEAF_DEL_LAST_FORCE: |
1678 | return otx2_qos_leaf_del_last(pfvf, classid: htb->classid, |
1679 | force: htb->command == TC_HTB_LEAF_DEL_LAST_FORCE, |
1680 | extack: htb->extack); |
1681 | case TC_HTB_LEAF_QUERY_QUEUE: |
1682 | res = otx2_get_txq_by_classid(pfvf, classid: htb->classid); |
1683 | htb->qid = res; |
1684 | return 0; |
1685 | case TC_HTB_NODE_MODIFY: |
1686 | fallthrough; |
1687 | default: |
1688 | return -EOPNOTSUPP; |
1689 | } |
1690 | } |
1691 | |