1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 |
2 | /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/types.h> |
6 | #include <linux/dcbnl.h> |
7 | #include <linux/if_ether.h> |
8 | #include <linux/list.h> |
9 | #include <linux/netlink.h> |
10 | |
11 | #include "spectrum.h" |
12 | #include "core.h" |
13 | #include "port.h" |
14 | #include "reg.h" |
15 | |
16 | struct mlxsw_sp_sb_pr { |
17 | enum mlxsw_reg_sbpr_mode mode; |
18 | u32 size; |
19 | u8 freeze_mode:1, |
20 | freeze_size:1; |
21 | }; |
22 | |
23 | struct mlxsw_cp_sb_occ { |
24 | u32 cur; |
25 | u32 max; |
26 | }; |
27 | |
28 | struct mlxsw_sp_sb_cm { |
29 | u32 min_buff; |
30 | u32 max_buff; |
31 | u16 pool_index; |
32 | struct mlxsw_cp_sb_occ occ; |
33 | u8 freeze_pool:1, |
34 | freeze_thresh:1; |
35 | }; |
36 | |
37 | #define MLXSW_SP_SB_INFI -1U |
38 | #define MLXSW_SP_SB_REST -2U |
39 | |
40 | struct mlxsw_sp_sb_pm { |
41 | u32 min_buff; |
42 | u32 max_buff; |
43 | struct mlxsw_cp_sb_occ occ; |
44 | }; |
45 | |
46 | struct mlxsw_sp_sb_mm { |
47 | u32 min_buff; |
48 | u32 max_buff; |
49 | u16 pool_index; |
50 | }; |
51 | |
52 | struct mlxsw_sp_sb_pool_des { |
53 | enum mlxsw_reg_sbxx_dir dir; |
54 | u8 pool; |
55 | }; |
56 | |
57 | #define MLXSW_SP_SB_POOL_ING 0 |
58 | #define MLXSW_SP_SB_POOL_EGR 4 |
59 | #define MLXSW_SP_SB_POOL_EGR_MC 8 |
60 | #define MLXSW_SP_SB_POOL_ING_CPU 9 |
61 | #define MLXSW_SP_SB_POOL_EGR_CPU 10 |
62 | |
63 | static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = { |
64 | {MLXSW_REG_SBXX_DIR_INGRESS, 0}, |
65 | {MLXSW_REG_SBXX_DIR_INGRESS, 1}, |
66 | {MLXSW_REG_SBXX_DIR_INGRESS, 2}, |
67 | {MLXSW_REG_SBXX_DIR_INGRESS, 3}, |
68 | {MLXSW_REG_SBXX_DIR_EGRESS, 0}, |
69 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, |
70 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, |
71 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, |
72 | {MLXSW_REG_SBXX_DIR_EGRESS, 15}, |
73 | {MLXSW_REG_SBXX_DIR_INGRESS, 4}, |
74 | {MLXSW_REG_SBXX_DIR_EGRESS, 4}, |
75 | }; |
76 | |
77 | static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { |
78 | {MLXSW_REG_SBXX_DIR_INGRESS, 0}, |
79 | {MLXSW_REG_SBXX_DIR_INGRESS, 1}, |
80 | {MLXSW_REG_SBXX_DIR_INGRESS, 2}, |
81 | {MLXSW_REG_SBXX_DIR_INGRESS, 3}, |
82 | {MLXSW_REG_SBXX_DIR_EGRESS, 0}, |
83 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, |
84 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, |
85 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, |
86 | {MLXSW_REG_SBXX_DIR_EGRESS, 15}, |
87 | {MLXSW_REG_SBXX_DIR_INGRESS, 4}, |
88 | {MLXSW_REG_SBXX_DIR_EGRESS, 4}, |
89 | }; |
90 | |
91 | #define MLXSW_SP_SB_ING_TC_COUNT 8 |
92 | #define MLXSW_SP_SB_EG_TC_COUNT 16 |
93 | |
94 | struct mlxsw_sp_sb_port { |
95 | struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; |
96 | struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; |
97 | struct mlxsw_sp_sb_pm *pms; |
98 | }; |
99 | |
100 | struct mlxsw_sp_sb { |
101 | struct mlxsw_sp_sb_pr *prs; |
102 | struct mlxsw_sp_sb_port *ports; |
103 | u32 cell_size; |
104 | u32 max_headroom_cells; |
105 | u64 sb_size; |
106 | }; |
107 | |
108 | struct mlxsw_sp_sb_vals { |
109 | unsigned int pool_count; |
110 | const struct mlxsw_sp_sb_pool_des *pool_dess; |
111 | const struct mlxsw_sp_sb_pm *pms; |
112 | const struct mlxsw_sp_sb_pm *pms_cpu; |
113 | const struct mlxsw_sp_sb_pr *prs; |
114 | const struct mlxsw_sp_sb_mm *mms; |
115 | const struct mlxsw_sp_sb_cm *cms_ingress; |
116 | const struct mlxsw_sp_sb_cm *cms_egress; |
117 | const struct mlxsw_sp_sb_cm *cms_cpu; |
118 | unsigned int mms_count; |
119 | unsigned int cms_ingress_count; |
120 | unsigned int cms_egress_count; |
121 | unsigned int cms_cpu_count; |
122 | }; |
123 | |
124 | struct mlxsw_sp_sb_ops { |
125 | u32 (*int_buf_size_get)(int mtu, u32 speed); |
126 | }; |
127 | |
128 | u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) |
129 | { |
130 | return mlxsw_sp->sb->cell_size * cells; |
131 | } |
132 | |
133 | u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) |
134 | { |
135 | return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); |
136 | } |
137 | |
138 | static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port, |
139 | u32 size_cells) |
140 | { |
141 | /* Ports with eight lanes use two headroom buffers between which the |
142 | * configured headroom size is split. Therefore, multiply the calculated |
143 | * headroom size by two. |
144 | */ |
145 | return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells; |
146 | } |
147 | |
148 | static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, |
149 | u16 pool_index) |
150 | { |
151 | return &mlxsw_sp->sb->prs[pool_index]; |
152 | } |
153 | |
154 | static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) |
155 | { |
156 | if (dir == MLXSW_REG_SBXX_DIR_INGRESS) |
157 | return pg_buff < MLXSW_SP_SB_ING_TC_COUNT; |
158 | else |
159 | return pg_buff < MLXSW_SP_SB_EG_TC_COUNT; |
160 | } |
161 | |
162 | static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, |
163 | u16 local_port, u8 pg_buff, |
164 | enum mlxsw_reg_sbxx_dir dir) |
165 | { |
166 | struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; |
167 | |
168 | WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir)); |
169 | if (dir == MLXSW_REG_SBXX_DIR_INGRESS) |
170 | return &sb_port->ing_cms[pg_buff]; |
171 | else |
172 | return &sb_port->eg_cms[pg_buff]; |
173 | } |
174 | |
175 | static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, |
176 | u16 local_port, u16 pool_index) |
177 | { |
178 | return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; |
179 | } |
180 | |
181 | static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, |
182 | enum mlxsw_reg_sbpr_mode mode, |
183 | u32 size, bool infi_size) |
184 | { |
185 | const struct mlxsw_sp_sb_pool_des *des = |
186 | &mlxsw_sp->sb_vals->pool_dess[pool_index]; |
187 | char sbpr_pl[MLXSW_REG_SBPR_LEN]; |
188 | struct mlxsw_sp_sb_pr *pr; |
189 | int err; |
190 | |
191 | mlxsw_reg_sbpr_pack(payload: sbpr_pl, pool: des->pool, dir: des->dir, mode, |
192 | size, infi_size); |
193 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbpr), payload: sbpr_pl); |
194 | if (err) |
195 | return err; |
196 | |
197 | if (infi_size) |
198 | size = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: mlxsw_sp->sb->sb_size); |
199 | pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); |
200 | pr->mode = mode; |
201 | pr->size = size; |
202 | return 0; |
203 | } |
204 | |
205 | static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp, |
206 | enum mlxsw_reg_sbxx_dir dir, |
207 | enum mlxsw_reg_sbpr_mode mode, |
208 | u32 size, bool infi_size) |
209 | { |
210 | char sbpr_pl[MLXSW_REG_SBPR_LEN]; |
211 | |
212 | /* The FW default descriptor buffer configuration uses only pool 14 for |
213 | * descriptors. |
214 | */ |
215 | mlxsw_reg_sbpr_pack(payload: sbpr_pl, pool: 14, dir, mode, size, infi_size); |
216 | mlxsw_reg_sbpr_desc_set(buf: sbpr_pl, val: true); |
217 | return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbpr), payload: sbpr_pl); |
218 | } |
219 | |
220 | static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
221 | u8 pg_buff, u32 min_buff, u32 max_buff, |
222 | bool infi_max, u16 pool_index) |
223 | { |
224 | const struct mlxsw_sp_sb_pool_des *des = |
225 | &mlxsw_sp->sb_vals->pool_dess[pool_index]; |
226 | char sbcm_pl[MLXSW_REG_SBCM_LEN]; |
227 | struct mlxsw_sp_sb_cm *cm; |
228 | int err; |
229 | |
230 | mlxsw_reg_sbcm_pack(payload: sbcm_pl, local_port, pg_buff, dir: des->dir, |
231 | min_buff, max_buff, infi_max, pool: des->pool); |
232 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbcm), payload: sbcm_pl); |
233 | if (err) |
234 | return err; |
235 | |
236 | if (mlxsw_sp_sb_cm_exists(pg_buff, dir: des->dir)) { |
237 | if (infi_max) |
238 | max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, |
239 | bytes: mlxsw_sp->sb->sb_size); |
240 | |
241 | cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, |
242 | dir: des->dir); |
243 | cm->min_buff = min_buff; |
244 | cm->max_buff = max_buff; |
245 | cm->pool_index = pool_index; |
246 | } |
247 | return 0; |
248 | } |
249 | |
250 | static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
251 | u16 pool_index, u32 min_buff, u32 max_buff) |
252 | { |
253 | const struct mlxsw_sp_sb_pool_des *des = |
254 | &mlxsw_sp->sb_vals->pool_dess[pool_index]; |
255 | char sbpm_pl[MLXSW_REG_SBPM_LEN]; |
256 | struct mlxsw_sp_sb_pm *pm; |
257 | int err; |
258 | |
259 | mlxsw_reg_sbpm_pack(payload: sbpm_pl, local_port, pool: des->pool, dir: des->dir, clr: false, |
260 | min_buff, max_buff); |
261 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbpm), payload: sbpm_pl); |
262 | if (err) |
263 | return err; |
264 | |
265 | pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); |
266 | pm->min_buff = min_buff; |
267 | pm->max_buff = max_buff; |
268 | return 0; |
269 | } |
270 | |
271 | static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
272 | u16 pool_index, struct list_head *bulk_list) |
273 | { |
274 | const struct mlxsw_sp_sb_pool_des *des = |
275 | &mlxsw_sp->sb_vals->pool_dess[pool_index]; |
276 | char sbpm_pl[MLXSW_REG_SBPM_LEN]; |
277 | |
278 | if (local_port == MLXSW_PORT_CPU_PORT && |
279 | des->dir == MLXSW_REG_SBXX_DIR_INGRESS) |
280 | return 0; |
281 | |
282 | mlxsw_reg_sbpm_pack(payload: sbpm_pl, local_port, pool: des->pool, dir: des->dir, |
283 | clr: true, min_buff: 0, max_buff: 0); |
284 | return mlxsw_reg_trans_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbpm), payload: sbpm_pl, |
285 | bulk_list, NULL, cb_priv: 0); |
286 | } |
287 | |
288 | static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, |
289 | char *sbpm_pl, size_t sbpm_pl_len, |
290 | unsigned long cb_priv) |
291 | { |
292 | struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv; |
293 | |
294 | mlxsw_reg_sbpm_unpack(payload: sbpm_pl, p_buff_occupancy: &pm->occ.cur, p_max_buff_occupancy: &pm->occ.max); |
295 | } |
296 | |
297 | static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
298 | u16 pool_index, struct list_head *bulk_list) |
299 | { |
300 | const struct mlxsw_sp_sb_pool_des *des = |
301 | &mlxsw_sp->sb_vals->pool_dess[pool_index]; |
302 | char sbpm_pl[MLXSW_REG_SBPM_LEN]; |
303 | struct mlxsw_sp_sb_pm *pm; |
304 | |
305 | if (local_port == MLXSW_PORT_CPU_PORT && |
306 | des->dir == MLXSW_REG_SBXX_DIR_INGRESS) |
307 | return 0; |
308 | |
309 | pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); |
310 | mlxsw_reg_sbpm_pack(payload: sbpm_pl, local_port, pool: des->pool, dir: des->dir, |
311 | clr: false, min_buff: 0, max_buff: 0); |
312 | return mlxsw_reg_trans_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbpm), payload: sbpm_pl, |
313 | bulk_list, |
314 | cb: mlxsw_sp_sb_pm_occ_query_cb, |
315 | cb_priv: (unsigned long) pm); |
316 | } |
317 | |
318 | void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom) |
319 | { |
320 | int prio; |
321 | |
322 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { |
323 | switch (hdroom->mode) { |
324 | case MLXSW_SP_HDROOM_MODE_DCB: |
325 | hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx; |
326 | break; |
327 | case MLXSW_SP_HDROOM_MODE_TC: |
328 | hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx; |
329 | break; |
330 | } |
331 | } |
332 | } |
333 | |
334 | void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom) |
335 | { |
336 | int prio; |
337 | int i; |
338 | |
339 | for (i = 0; i < DCBX_MAX_BUFFERS; i++) |
340 | hdroom->bufs.buf[i].lossy = true; |
341 | |
342 | for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) { |
343 | if (!hdroom->prios.prio[prio].lossy) |
344 | hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false; |
345 | } |
346 | } |
347 | |
348 | static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu) |
349 | { |
350 | return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, bytes: mtu); |
351 | } |
352 | |
353 | static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy) |
354 | { |
355 | if (lossy) |
356 | mlxsw_reg_pbmc_lossy_buffer_pack(payload: pbmc_pl, buf_index: index, size); |
357 | else |
358 | mlxsw_reg_pbmc_lossless_buffer_pack(payload: pbmc_pl, buf_index: index, size, |
359 | threshold: thres); |
360 | } |
361 | |
362 | static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, |
363 | const struct mlxsw_sp_hdroom *hdroom) |
364 | { |
365 | u16 delay_cells; |
366 | |
367 | delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: hdroom->delay_bytes); |
368 | |
369 | /* In the worst case scenario the delay will be made up of packets that |
370 | * are all of size CELL_SIZE + 1, which means each packet will require |
371 | * almost twice its true size when buffered in the switch. We therefore |
372 | * multiply this value by the "cell factor", which is close to 2. |
373 | * |
374 | * Another MTU is added in case the transmitting host already started |
375 | * transmitting a maximum length frame when the PFC packet was received. |
376 | */ |
377 | return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, bytes: hdroom->mtu); |
378 | } |
379 | |
380 | static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) |
381 | { |
382 | u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(mtu, speed); |
383 | |
384 | return mlxsw_sp_bytes_cells(mlxsw_sp, bytes: buffsize) + 1; |
385 | } |
386 | |
387 | static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf) |
388 | { |
389 | int prio; |
390 | |
391 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { |
392 | if (hdroom->prios.prio[prio].buf_idx == buf) |
393 | return true; |
394 | } |
395 | return false; |
396 | } |
397 | |
398 | void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, |
399 | struct mlxsw_sp_hdroom *hdroom) |
400 | { |
401 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
402 | u16 reserve_cells; |
403 | int i; |
404 | |
405 | /* Internal buffer. */ |
406 | reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mtu: mlxsw_sp_port->max_mtu, |
407 | speed: mlxsw_sp_port->max_speed); |
408 | reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size_cells: reserve_cells); |
409 | hdroom->int_buf.reserve_cells = reserve_cells; |
410 | |
411 | if (hdroom->int_buf.enable) |
412 | hdroom->int_buf.size_cells = reserve_cells; |
413 | else |
414 | hdroom->int_buf.size_cells = 0; |
415 | |
416 | /* PG buffers. */ |
417 | for (i = 0; i < DCBX_MAX_BUFFERS; i++) { |
418 | struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i]; |
419 | u16 thres_cells; |
420 | u16 delay_cells; |
421 | |
422 | if (!mlxsw_sp_hdroom_buf_is_used(hdroom, buf: i)) { |
423 | thres_cells = 0; |
424 | delay_cells = 0; |
425 | } else if (buf->lossy) { |
426 | thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, mtu: hdroom->mtu); |
427 | delay_cells = 0; |
428 | } else { |
429 | thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, mtu: hdroom->mtu); |
430 | delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom); |
431 | } |
432 | |
433 | thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size_cells: thres_cells); |
434 | delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size_cells: delay_cells); |
435 | |
436 | buf->thres_cells = thres_cells; |
437 | if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) { |
438 | buf->size_cells = thres_cells + delay_cells; |
439 | } else { |
440 | /* Do not allow going below the minimum size, even if |
441 | * the user requested it. |
442 | */ |
443 | buf->size_cells = max(buf->set_size_cells, buf->thres_cells); |
444 | } |
445 | } |
446 | } |
447 | |
448 | #define MLXSW_SP_PB_UNUSED 8 |
449 | |
450 | static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port, |
451 | const struct mlxsw_sp_hdroom *hdroom, bool force) |
452 | { |
453 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
454 | char pbmc_pl[MLXSW_REG_PBMC_LEN]; |
455 | bool dirty; |
456 | int err; |
457 | int i; |
458 | |
459 | dirty = memcmp(p: &mlxsw_sp_port->hdroom->bufs, q: &hdroom->bufs, size: sizeof(hdroom->bufs)); |
460 | if (!dirty && !force) |
461 | return 0; |
462 | |
463 | mlxsw_reg_pbmc_pack(payload: pbmc_pl, local_port: mlxsw_sp_port->local_port, xoff_timer_value: 0xffff, xoff_refresh: 0xffff / 2); |
464 | for (i = 0; i < MLXSW_SP_PB_COUNT; i++) { |
465 | const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i]; |
466 | |
467 | if (i == MLXSW_SP_PB_UNUSED) |
468 | continue; |
469 | |
470 | mlxsw_sp_hdroom_buf_pack(pbmc_pl, index: i, size: buf->size_cells, thres: buf->thres_cells, lossy: buf->lossy); |
471 | } |
472 | |
473 | mlxsw_reg_pbmc_lossy_buffer_pack(payload: pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, size: 0); |
474 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pbmc), payload: pbmc_pl); |
475 | if (err) |
476 | return err; |
477 | |
478 | mlxsw_sp_port->hdroom->bufs = hdroom->bufs; |
479 | return 0; |
480 | } |
481 | |
482 | static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port, |
483 | const struct mlxsw_sp_hdroom *hdroom, bool force) |
484 | { |
485 | char pptb_pl[MLXSW_REG_PPTB_LEN]; |
486 | bool dirty; |
487 | int prio; |
488 | int err; |
489 | |
490 | dirty = memcmp(p: &mlxsw_sp_port->hdroom->prios, q: &hdroom->prios, size: sizeof(hdroom->prios)); |
491 | if (!dirty && !force) |
492 | return 0; |
493 | |
494 | mlxsw_reg_pptb_pack(payload: pptb_pl, local_port: mlxsw_sp_port->local_port); |
495 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) |
496 | mlxsw_reg_pptb_prio_to_buff_pack(payload: pptb_pl, prio, buff: hdroom->prios.prio[prio].buf_idx); |
497 | |
498 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), payload: pptb_pl); |
499 | if (err) |
500 | return err; |
501 | |
502 | mlxsw_sp_port->hdroom->prios = hdroom->prios; |
503 | return 0; |
504 | } |
505 | |
506 | static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port, |
507 | const struct mlxsw_sp_hdroom *hdroom, bool force) |
508 | { |
509 | char sbib_pl[MLXSW_REG_SBIB_LEN]; |
510 | bool dirty; |
511 | int err; |
512 | |
513 | dirty = memcmp(p: &mlxsw_sp_port->hdroom->int_buf, q: &hdroom->int_buf, size: sizeof(hdroom->int_buf)); |
514 | if (!dirty && !force) |
515 | return 0; |
516 | |
517 | mlxsw_reg_sbib_pack(payload: sbib_pl, local_port: mlxsw_sp_port->local_port, buff_size: hdroom->int_buf.size_cells); |
518 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), payload: sbib_pl); |
519 | if (err) |
520 | return err; |
521 | |
522 | mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf; |
523 | return 0; |
524 | } |
525 | |
526 | static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp, |
527 | const struct mlxsw_sp_hdroom *hdroom) |
528 | { |
529 | u32 taken_headroom_cells = 0; |
530 | int i; |
531 | |
532 | for (i = 0; i < MLXSW_SP_PB_COUNT; i++) |
533 | taken_headroom_cells += hdroom->bufs.buf[i].size_cells; |
534 | |
535 | taken_headroom_cells += hdroom->int_buf.reserve_cells; |
536 | return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells; |
537 | } |
538 | |
539 | static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, |
540 | const struct mlxsw_sp_hdroom *hdroom, bool force) |
541 | { |
542 | struct mlxsw_sp_hdroom orig_hdroom; |
543 | struct mlxsw_sp_hdroom tmp_hdroom; |
544 | int err; |
545 | int i; |
546 | |
547 | /* Port buffers need to be configured in three steps. First, all buffers |
548 | * with non-zero size are configured. Then, prio-to-buffer map is |
549 | * updated, allowing traffic to flow to the now non-zero buffers. |
550 | * Finally, zero-sized buffers are configured, because now no traffic |
551 | * should be directed to them anymore. This way, in a non-congested |
552 | * system, no packet drops are introduced by the reconfiguration. |
553 | */ |
554 | |
555 | orig_hdroom = *mlxsw_sp_port->hdroom; |
556 | tmp_hdroom = orig_hdroom; |
557 | for (i = 0; i < MLXSW_SP_PB_COUNT; i++) { |
558 | if (hdroom->bufs.buf[i].size_cells) |
559 | tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i]; |
560 | } |
561 | |
562 | if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp: mlxsw_sp_port->mlxsw_sp, hdroom: &tmp_hdroom) || |
563 | !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp: mlxsw_sp_port->mlxsw_sp, hdroom)) |
564 | return -ENOBUFS; |
565 | |
566 | err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom: &tmp_hdroom, force); |
567 | if (err) |
568 | return err; |
569 | |
570 | err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force); |
571 | if (err) |
572 | goto err_configure_priomap; |
573 | |
574 | err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, force: false); |
575 | if (err) |
576 | goto err_configure_buffers; |
577 | |
578 | err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, force: false); |
579 | if (err) |
580 | goto err_configure_int_buf; |
581 | |
582 | *mlxsw_sp_port->hdroom = *hdroom; |
583 | return 0; |
584 | |
585 | err_configure_int_buf: |
586 | mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom: &tmp_hdroom, force: false); |
587 | err_configure_buffers: |
588 | mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom: &tmp_hdroom, force: false); |
589 | err_configure_priomap: |
590 | mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom: &orig_hdroom, force: false); |
591 | return err; |
592 | } |
593 | |
594 | int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, |
595 | const struct mlxsw_sp_hdroom *hdroom) |
596 | { |
597 | return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, force: false); |
598 | } |
599 | |
600 | static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) |
601 | { |
602 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
603 | struct mlxsw_sp_hdroom hdroom = {}; |
604 | u32 size9; |
605 | int prio; |
606 | |
607 | hdroom.mtu = mlxsw_sp_port->dev->mtu; |
608 | hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB; |
609 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) |
610 | hdroom.prios.prio[prio].lossy = true; |
611 | |
612 | mlxsw_sp_hdroom_bufs_reset_lossiness(hdroom: &hdroom); |
613 | mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, hdroom: &hdroom); |
614 | |
615 | /* Buffer 9 is used for control traffic. */ |
616 | size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size_cells: mlxsw_sp_port->max_mtu); |
617 | hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: size9); |
618 | |
619 | return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom: &hdroom, force: true); |
620 | } |
621 | |
622 | static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp, |
623 | struct mlxsw_sp_sb_port *sb_port) |
624 | { |
625 | struct mlxsw_sp_sb_pm *pms; |
626 | |
627 | pms = kcalloc(n: mlxsw_sp->sb_vals->pool_count, size: sizeof(*pms), |
628 | GFP_KERNEL); |
629 | if (!pms) |
630 | return -ENOMEM; |
631 | sb_port->pms = pms; |
632 | return 0; |
633 | } |
634 | |
635 | static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port) |
636 | { |
637 | kfree(objp: sb_port->pms); |
638 | } |
639 | |
640 | static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) |
641 | { |
642 | unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core: mlxsw_sp->core); |
643 | struct mlxsw_sp_sb_pr *prs; |
644 | int i; |
645 | int err; |
646 | |
647 | mlxsw_sp->sb->ports = kcalloc(n: max_ports, |
648 | size: sizeof(struct mlxsw_sp_sb_port), |
649 | GFP_KERNEL); |
650 | if (!mlxsw_sp->sb->ports) |
651 | return -ENOMEM; |
652 | |
653 | prs = kcalloc(n: mlxsw_sp->sb_vals->pool_count, size: sizeof(*prs), |
654 | GFP_KERNEL); |
655 | if (!prs) { |
656 | err = -ENOMEM; |
657 | goto err_alloc_prs; |
658 | } |
659 | mlxsw_sp->sb->prs = prs; |
660 | |
661 | for (i = 0; i < max_ports; i++) { |
662 | err = mlxsw_sp_sb_port_init(mlxsw_sp, sb_port: &mlxsw_sp->sb->ports[i]); |
663 | if (err) |
664 | goto err_sb_port_init; |
665 | } |
666 | |
667 | return 0; |
668 | |
669 | err_sb_port_init: |
670 | for (i--; i >= 0; i--) |
671 | mlxsw_sp_sb_port_fini(sb_port: &mlxsw_sp->sb->ports[i]); |
672 | kfree(objp: mlxsw_sp->sb->prs); |
673 | err_alloc_prs: |
674 | kfree(objp: mlxsw_sp->sb->ports); |
675 | return err; |
676 | } |
677 | |
678 | static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) |
679 | { |
680 | int max_ports = mlxsw_core_max_ports(mlxsw_core: mlxsw_sp->core); |
681 | int i; |
682 | |
683 | for (i = max_ports - 1; i >= 0; i--) |
684 | mlxsw_sp_sb_port_fini(sb_port: &mlxsw_sp->sb->ports[i]); |
685 | kfree(objp: mlxsw_sp->sb->prs); |
686 | kfree(objp: mlxsw_sp->sb->ports); |
687 | } |
688 | |
689 | #define MLXSW_SP_SB_PR(_mode, _size) \ |
690 | { \ |
691 | .mode = _mode, \ |
692 | .size = _size, \ |
693 | } |
694 | |
695 | #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \ |
696 | { \ |
697 | .mode = _mode, \ |
698 | .size = _size, \ |
699 | .freeze_mode = _freeze_mode, \ |
700 | .freeze_size = _freeze_size, \ |
701 | } |
702 | |
703 | #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000) |
704 | |
705 | /* Order according to mlxsw_sp1_sb_pool_dess */ |
706 | static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { |
707 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST), |
708 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
709 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
710 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
711 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST, |
712 | true, false), |
713 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
714 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
715 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), |
716 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI, |
717 | true, true), |
718 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, |
719 | MLXSW_SP1_SB_PR_CPU_SIZE, true, false), |
720 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, |
721 | MLXSW_SP1_SB_PR_CPU_SIZE, true, false), |
722 | }; |
723 | |
724 | #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) |
725 | |
726 | /* Order according to mlxsw_sp2_sb_pool_dess */ |
727 | static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { |
728 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST), |
729 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
730 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
731 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
732 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST, |
733 | true, false), |
734 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
735 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
736 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
737 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI, |
738 | true, true), |
739 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, |
740 | MLXSW_SP2_SB_PR_CPU_SIZE, true, false), |
741 | MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, |
742 | MLXSW_SP2_SB_PR_CPU_SIZE, true, false), |
743 | }; |
744 | |
745 | static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, |
746 | const struct mlxsw_sp_sb_pr *prs, |
747 | const struct mlxsw_sp_sb_pool_des *pool_dess, |
748 | size_t prs_len) |
749 | { |
750 | /* Round down, unlike mlxsw_sp_bytes_cells(). */ |
751 | u32 sb_cells = div_u64(dividend: mlxsw_sp->sb->sb_size, divisor: mlxsw_sp->sb->cell_size); |
752 | u32 rest_cells[2] = {sb_cells, sb_cells}; |
753 | int i; |
754 | int err; |
755 | |
756 | /* Calculate how much space to give to the "REST" pools in either |
757 | * direction. |
758 | */ |
759 | for (i = 0; i < prs_len; i++) { |
760 | enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir; |
761 | u32 size = prs[i].size; |
762 | u32 size_cells; |
763 | |
764 | if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST) |
765 | continue; |
766 | |
767 | size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: size); |
768 | if (WARN_ON_ONCE(size_cells > rest_cells[dir])) |
769 | continue; |
770 | |
771 | rest_cells[dir] -= size_cells; |
772 | } |
773 | |
774 | for (i = 0; i < prs_len; i++) { |
775 | u32 size = prs[i].size; |
776 | u32 size_cells; |
777 | |
778 | if (size == MLXSW_SP_SB_INFI) { |
779 | err = mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index: i, mode: prs[i].mode, |
780 | size: 0, infi_size: true); |
781 | } else if (size == MLXSW_SP_SB_REST) { |
782 | size_cells = rest_cells[pool_dess[i].dir]; |
783 | err = mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index: i, mode: prs[i].mode, |
784 | size: size_cells, infi_size: false); |
785 | } else { |
786 | size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: size); |
787 | err = mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index: i, mode: prs[i].mode, |
788 | size: size_cells, infi_size: false); |
789 | } |
790 | if (err) |
791 | return err; |
792 | } |
793 | |
794 | err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, dir: MLXSW_REG_SBXX_DIR_INGRESS, |
795 | mode: MLXSW_REG_SBPR_MODE_DYNAMIC, size: 0, infi_size: true); |
796 | if (err) |
797 | return err; |
798 | |
799 | err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, dir: MLXSW_REG_SBXX_DIR_EGRESS, |
800 | mode: MLXSW_REG_SBPR_MODE_DYNAMIC, size: 0, infi_size: true); |
801 | if (err) |
802 | return err; |
803 | |
804 | return 0; |
805 | } |
806 | |
807 | #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ |
808 | { \ |
809 | .min_buff = _min_buff, \ |
810 | .max_buff = _max_buff, \ |
811 | .pool_index = _pool, \ |
812 | } |
813 | |
814 | #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \ |
815 | { \ |
816 | .min_buff = _min_buff, \ |
817 | .max_buff = _max_buff, \ |
818 | .pool_index = MLXSW_SP_SB_POOL_ING, \ |
819 | } |
820 | |
821 | #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \ |
822 | { \ |
823 | .min_buff = _min_buff, \ |
824 | .max_buff = _max_buff, \ |
825 | .pool_index = MLXSW_SP_SB_POOL_EGR, \ |
826 | } |
827 | |
828 | #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \ |
829 | { \ |
830 | .min_buff = _min_buff, \ |
831 | .max_buff = _max_buff, \ |
832 | .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \ |
833 | .freeze_pool = true, \ |
834 | .freeze_thresh = true, \ |
835 | } |
836 | |
837 | static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = { |
838 | MLXSW_SP_SB_CM_ING(10000, 8), |
839 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
840 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
841 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
842 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
843 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
844 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
845 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
846 | MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */ |
847 | MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU), |
848 | }; |
849 | |
850 | static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = { |
851 | MLXSW_SP_SB_CM_ING(0, 7), |
852 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
853 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
854 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
855 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
856 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
857 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
858 | MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
859 | MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */ |
860 | MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU), |
861 | }; |
862 | |
863 | static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = { |
864 | MLXSW_SP_SB_CM_EGR(1500, 9), |
865 | MLXSW_SP_SB_CM_EGR(1500, 9), |
866 | MLXSW_SP_SB_CM_EGR(1500, 9), |
867 | MLXSW_SP_SB_CM_EGR(1500, 9), |
868 | MLXSW_SP_SB_CM_EGR(1500, 9), |
869 | MLXSW_SP_SB_CM_EGR(1500, 9), |
870 | MLXSW_SP_SB_CM_EGR(1500, 9), |
871 | MLXSW_SP_SB_CM_EGR(1500, 9), |
872 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
873 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
874 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
875 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
876 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
877 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
878 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
879 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
880 | MLXSW_SP_SB_CM_EGR(1, 0xff), |
881 | }; |
882 | |
883 | static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { |
884 | MLXSW_SP_SB_CM_EGR(0, 7), |
885 | MLXSW_SP_SB_CM_EGR(0, 7), |
886 | MLXSW_SP_SB_CM_EGR(0, 7), |
887 | MLXSW_SP_SB_CM_EGR(0, 7), |
888 | MLXSW_SP_SB_CM_EGR(0, 7), |
889 | MLXSW_SP_SB_CM_EGR(0, 7), |
890 | MLXSW_SP_SB_CM_EGR(0, 7), |
891 | MLXSW_SP_SB_CM_EGR(0, 7), |
892 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
893 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
894 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
895 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
896 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
897 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
898 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
899 | MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), |
900 | MLXSW_SP_SB_CM_EGR(1, 0xff), |
901 | }; |
902 | |
903 | #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU) |
904 | |
905 | static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { |
906 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
907 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
908 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
909 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
910 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
911 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
912 | MLXSW_SP_CPU_PORT_SB_CM, |
913 | MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), |
914 | MLXSW_SP_CPU_PORT_SB_CM, |
915 | MLXSW_SP_CPU_PORT_SB_CM, |
916 | MLXSW_SP_CPU_PORT_SB_CM, |
917 | MLXSW_SP_CPU_PORT_SB_CM, |
918 | MLXSW_SP_CPU_PORT_SB_CM, |
919 | MLXSW_SP_CPU_PORT_SB_CM, |
920 | MLXSW_SP_CPU_PORT_SB_CM, |
921 | MLXSW_SP_CPU_PORT_SB_CM, |
922 | MLXSW_SP_CPU_PORT_SB_CM, |
923 | MLXSW_SP_CPU_PORT_SB_CM, |
924 | MLXSW_SP_CPU_PORT_SB_CM, |
925 | MLXSW_SP_CPU_PORT_SB_CM, |
926 | MLXSW_SP_CPU_PORT_SB_CM, |
927 | MLXSW_SP_CPU_PORT_SB_CM, |
928 | MLXSW_SP_CPU_PORT_SB_CM, |
929 | MLXSW_SP_CPU_PORT_SB_CM, |
930 | MLXSW_SP_CPU_PORT_SB_CM, |
931 | MLXSW_SP_CPU_PORT_SB_CM, |
932 | MLXSW_SP_CPU_PORT_SB_CM, |
933 | MLXSW_SP_CPU_PORT_SB_CM, |
934 | MLXSW_SP_CPU_PORT_SB_CM, |
935 | MLXSW_SP_CPU_PORT_SB_CM, |
936 | MLXSW_SP_CPU_PORT_SB_CM, |
937 | MLXSW_SP_CPU_PORT_SB_CM, |
938 | }; |
939 | |
940 | static bool |
941 | mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) |
942 | { |
943 | struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); |
944 | |
945 | return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; |
946 | } |
947 | |
948 | static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
949 | enum mlxsw_reg_sbxx_dir dir, |
950 | const struct mlxsw_sp_sb_cm *cms, |
951 | size_t cms_len) |
952 | { |
953 | const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals; |
954 | int i; |
955 | int err; |
956 | |
957 | for (i = 0; i < cms_len; i++) { |
958 | const struct mlxsw_sp_sb_cm *cm; |
959 | u32 min_buff; |
960 | u32 max_buff; |
961 | |
962 | if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) |
963 | continue; /* PG number 8 does not exist, skip it */ |
964 | cm = &cms[i]; |
965 | if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir)) |
966 | continue; |
967 | |
968 | min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: cm->min_buff); |
969 | max_buff = cm->max_buff; |
970 | if (max_buff == MLXSW_SP_SB_INFI) { |
971 | err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff: i, |
972 | min_buff, max_buff: 0, |
973 | infi_max: true, pool_index: cm->pool_index); |
974 | } else { |
975 | if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, |
976 | pool_index: cm->pool_index)) |
977 | max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, |
978 | bytes: max_buff); |
979 | err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff: i, |
980 | min_buff, max_buff, |
981 | infi_max: false, pool_index: cm->pool_index); |
982 | } |
983 | if (err) |
984 | return err; |
985 | } |
986 | return 0; |
987 | } |
988 | |
989 | static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) |
990 | { |
991 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
992 | int err; |
993 | |
994 | err = __mlxsw_sp_sb_cms_init(mlxsw_sp, |
995 | local_port: mlxsw_sp_port->local_port, |
996 | dir: MLXSW_REG_SBXX_DIR_INGRESS, |
997 | cms: mlxsw_sp->sb_vals->cms_ingress, |
998 | cms_len: mlxsw_sp->sb_vals->cms_ingress_count); |
999 | if (err) |
1000 | return err; |
1001 | return __mlxsw_sp_sb_cms_init(mlxsw_sp: mlxsw_sp_port->mlxsw_sp, |
1002 | local_port: mlxsw_sp_port->local_port, |
1003 | dir: MLXSW_REG_SBXX_DIR_EGRESS, |
1004 | cms: mlxsw_sp->sb_vals->cms_egress, |
1005 | cms_len: mlxsw_sp->sb_vals->cms_egress_count); |
1006 | } |
1007 | |
1008 | static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) |
1009 | { |
1010 | return __mlxsw_sp_sb_cms_init(mlxsw_sp, local_port: 0, dir: MLXSW_REG_SBXX_DIR_EGRESS, |
1011 | cms: mlxsw_sp->sb_vals->cms_cpu, |
1012 | cms_len: mlxsw_sp->sb_vals->cms_cpu_count); |
1013 | } |
1014 | |
1015 | #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ |
1016 | { \ |
1017 | .min_buff = _min_buff, \ |
1018 | .max_buff = _max_buff, \ |
1019 | } |
1020 | |
1021 | /* Order according to mlxsw_sp1_sb_pool_dess */ |
1022 | static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = { |
1023 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), |
1024 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1025 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1026 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1027 | MLXSW_SP_SB_PM(0, 7), |
1028 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1029 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1030 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1031 | MLXSW_SP_SB_PM(10000, 90000), |
1032 | MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */ |
1033 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1034 | }; |
1035 | |
1036 | /* Order according to mlxsw_sp2_sb_pool_dess */ |
1037 | static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { |
1038 | MLXSW_SP_SB_PM(0, 7), |
1039 | MLXSW_SP_SB_PM(0, 0), |
1040 | MLXSW_SP_SB_PM(0, 0), |
1041 | MLXSW_SP_SB_PM(0, 0), |
1042 | MLXSW_SP_SB_PM(0, 7), |
1043 | MLXSW_SP_SB_PM(0, 0), |
1044 | MLXSW_SP_SB_PM(0, 0), |
1045 | MLXSW_SP_SB_PM(0, 0), |
1046 | MLXSW_SP_SB_PM(10000, 90000), |
1047 | MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */ |
1048 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), |
1049 | }; |
1050 | |
1051 | /* Order according to mlxsw_sp*_sb_pool_dess */ |
1052 | static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = { |
1053 | MLXSW_SP_SB_PM(0, 0), |
1054 | MLXSW_SP_SB_PM(0, 0), |
1055 | MLXSW_SP_SB_PM(0, 0), |
1056 | MLXSW_SP_SB_PM(0, 0), |
1057 | MLXSW_SP_SB_PM(0, 0), |
1058 | MLXSW_SP_SB_PM(0, 0), |
1059 | MLXSW_SP_SB_PM(0, 0), |
1060 | MLXSW_SP_SB_PM(0, 0), |
1061 | MLXSW_SP_SB_PM(0, 90000), |
1062 | MLXSW_SP_SB_PM(0, 0), |
1063 | MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), |
1064 | }; |
1065 | |
1066 | static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, |
1067 | const struct mlxsw_sp_sb_pm *pms, |
1068 | bool skip_ingress) |
1069 | { |
1070 | int i, err; |
1071 | |
1072 | for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { |
1073 | const struct mlxsw_sp_sb_pm *pm = &pms[i]; |
1074 | const struct mlxsw_sp_sb_pool_des *des; |
1075 | u32 max_buff; |
1076 | u32 min_buff; |
1077 | |
1078 | des = &mlxsw_sp->sb_vals->pool_dess[i]; |
1079 | if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS) |
1080 | continue; |
1081 | |
1082 | min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: pm->min_buff); |
1083 | max_buff = pm->max_buff; |
1084 | if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, pool_index: i)) |
1085 | max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: max_buff); |
1086 | err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index: i, min_buff, |
1087 | max_buff); |
1088 | if (err) |
1089 | return err; |
1090 | } |
1091 | return 0; |
1092 | } |
1093 | |
1094 | static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1095 | { |
1096 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1097 | |
1098 | return mlxsw_sp_sb_pms_init(mlxsw_sp, local_port: mlxsw_sp_port->local_port, |
1099 | pms: mlxsw_sp->sb_vals->pms, skip_ingress: false); |
1100 | } |
1101 | |
1102 | static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp) |
1103 | { |
1104 | return mlxsw_sp_sb_pms_init(mlxsw_sp, local_port: 0, pms: mlxsw_sp->sb_vals->pms_cpu, |
1105 | skip_ingress: true); |
1106 | } |
1107 | |
1108 | #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \ |
1109 | { \ |
1110 | .min_buff = _min_buff, \ |
1111 | .max_buff = _max_buff, \ |
1112 | .pool_index = MLXSW_SP_SB_POOL_EGR, \ |
1113 | } |
1114 | |
1115 | static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { |
1116 | MLXSW_SP_SB_MM(0, 6), |
1117 | MLXSW_SP_SB_MM(0, 6), |
1118 | MLXSW_SP_SB_MM(0, 6), |
1119 | MLXSW_SP_SB_MM(0, 6), |
1120 | MLXSW_SP_SB_MM(0, 6), |
1121 | MLXSW_SP_SB_MM(0, 6), |
1122 | MLXSW_SP_SB_MM(0, 6), |
1123 | MLXSW_SP_SB_MM(0, 6), |
1124 | MLXSW_SP_SB_MM(0, 6), |
1125 | MLXSW_SP_SB_MM(0, 6), |
1126 | MLXSW_SP_SB_MM(0, 6), |
1127 | MLXSW_SP_SB_MM(0, 6), |
1128 | MLXSW_SP_SB_MM(0, 6), |
1129 | MLXSW_SP_SB_MM(0, 6), |
1130 | MLXSW_SP_SB_MM(0, 6), |
1131 | }; |
1132 | |
1133 | static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) |
1134 | { |
1135 | char sbmm_pl[MLXSW_REG_SBMM_LEN]; |
1136 | int i; |
1137 | int err; |
1138 | |
1139 | for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) { |
1140 | const struct mlxsw_sp_sb_pool_des *des; |
1141 | const struct mlxsw_sp_sb_mm *mc; |
1142 | u32 min_buff; |
1143 | |
1144 | mc = &mlxsw_sp->sb_vals->mms[i]; |
1145 | des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index]; |
1146 | /* All pools used by sb_mm's are initialized using dynamic |
1147 | * thresholds, therefore 'max_buff' isn't specified in cells. |
1148 | */ |
1149 | min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: mc->min_buff); |
1150 | mlxsw_reg_sbmm_pack(payload: sbmm_pl, prio: i, min_buff, max_buff: mc->max_buff, |
1151 | pool: des->pool); |
1152 | err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sbmm), payload: sbmm_pl); |
1153 | if (err) |
1154 | return err; |
1155 | } |
1156 | return 0; |
1157 | } |
1158 | |
1159 | static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp, |
1160 | u16 *p_ingress_len, u16 *p_egress_len) |
1161 | { |
1162 | int i; |
1163 | |
1164 | for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) { |
1165 | if (mlxsw_sp->sb_vals->pool_dess[i].dir == |
1166 | MLXSW_REG_SBXX_DIR_INGRESS) |
1167 | (*p_ingress_len)++; |
1168 | else |
1169 | (*p_egress_len)++; |
1170 | } |
1171 | |
1172 | WARN(*p_egress_len == 0, "No egress pools\n" ); |
1173 | } |
1174 | |
1175 | const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = { |
1176 | .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess), |
1177 | .pool_dess = mlxsw_sp1_sb_pool_dess, |
1178 | .pms = mlxsw_sp1_sb_pms, |
1179 | .pms_cpu = mlxsw_sp_cpu_port_sb_pms, |
1180 | .prs = mlxsw_sp1_sb_prs, |
1181 | .mms = mlxsw_sp_sb_mms, |
1182 | .cms_ingress = mlxsw_sp1_sb_cms_ingress, |
1183 | .cms_egress = mlxsw_sp1_sb_cms_egress, |
1184 | .cms_cpu = mlxsw_sp_cpu_port_sb_cms, |
1185 | .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), |
1186 | .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress), |
1187 | .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress), |
1188 | .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), |
1189 | }; |
1190 | |
1191 | const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { |
1192 | .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess), |
1193 | .pool_dess = mlxsw_sp2_sb_pool_dess, |
1194 | .pms = mlxsw_sp2_sb_pms, |
1195 | .pms_cpu = mlxsw_sp_cpu_port_sb_pms, |
1196 | .prs = mlxsw_sp2_sb_prs, |
1197 | .mms = mlxsw_sp_sb_mms, |
1198 | .cms_ingress = mlxsw_sp2_sb_cms_ingress, |
1199 | .cms_egress = mlxsw_sp2_sb_cms_egress, |
1200 | .cms_cpu = mlxsw_sp_cpu_port_sb_cms, |
1201 | .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), |
1202 | .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress), |
1203 | .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress), |
1204 | .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), |
1205 | }; |
1206 | |
1207 | static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed) |
1208 | { |
1209 | return mtu * 5 / 2; |
1210 | } |
1211 | |
1212 | static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor) |
1213 | { |
1214 | return 3 * mtu + buffer_factor * speed / 1000; |
1215 | } |
1216 | |
1217 | #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 |
1218 | |
1219 | static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed) |
1220 | { |
1221 | int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; |
1222 | |
1223 | return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, buffer_factor: factor); |
1224 | } |
1225 | |
1226 | #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 |
1227 | |
1228 | static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed) |
1229 | { |
1230 | int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; |
1231 | |
1232 | return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, buffer_factor: factor); |
1233 | } |
1234 | |
1235 | const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = { |
1236 | .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get, |
1237 | }; |
1238 | |
1239 | const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = { |
1240 | .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get, |
1241 | }; |
1242 | |
1243 | const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = { |
1244 | .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get, |
1245 | }; |
1246 | |
1247 | int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) |
1248 | { |
1249 | u32 max_headroom_size; |
1250 | u16 ing_pool_count = 0; |
1251 | u16 eg_pool_count = 0; |
1252 | int err; |
1253 | |
1254 | if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) |
1255 | return -EIO; |
1256 | |
1257 | if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER)) |
1258 | return -EIO; |
1259 | |
1260 | if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE)) |
1261 | return -EIO; |
1262 | |
1263 | mlxsw_sp->sb = kzalloc(size: sizeof(*mlxsw_sp->sb), GFP_KERNEL); |
1264 | if (!mlxsw_sp->sb) |
1265 | return -ENOMEM; |
1266 | mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); |
1267 | mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, |
1268 | GUARANTEED_SHARED_BUFFER); |
1269 | max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, |
1270 | MAX_HEADROOM_SIZE); |
1271 | /* Round down, because this limit must not be overstepped. */ |
1272 | mlxsw_sp->sb->max_headroom_cells = max_headroom_size / |
1273 | mlxsw_sp->sb->cell_size; |
1274 | |
1275 | err = mlxsw_sp_sb_ports_init(mlxsw_sp); |
1276 | if (err) |
1277 | goto err_sb_ports_init; |
1278 | err = mlxsw_sp_sb_prs_init(mlxsw_sp, prs: mlxsw_sp->sb_vals->prs, |
1279 | pool_dess: mlxsw_sp->sb_vals->pool_dess, |
1280 | prs_len: mlxsw_sp->sb_vals->pool_count); |
1281 | if (err) |
1282 | goto err_sb_prs_init; |
1283 | err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); |
1284 | if (err) |
1285 | goto err_sb_cpu_port_sb_cms_init; |
1286 | err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp); |
1287 | if (err) |
1288 | goto err_sb_cpu_port_pms_init; |
1289 | err = mlxsw_sp_sb_mms_init(mlxsw_sp); |
1290 | if (err) |
1291 | goto err_sb_mms_init; |
1292 | mlxsw_sp_pool_count(mlxsw_sp, p_ingress_len: &ing_pool_count, p_egress_len: &eg_pool_count); |
1293 | err = devl_sb_register(devlink: priv_to_devlink(priv: mlxsw_sp->core), sb_index: 0, |
1294 | size: mlxsw_sp->sb->sb_size, |
1295 | ingress_pools_count: ing_pool_count, |
1296 | egress_pools_count: eg_pool_count, |
1297 | MLXSW_SP_SB_ING_TC_COUNT, |
1298 | MLXSW_SP_SB_EG_TC_COUNT); |
1299 | if (err) |
1300 | goto err_devlink_sb_register; |
1301 | |
1302 | return 0; |
1303 | |
1304 | err_devlink_sb_register: |
1305 | err_sb_mms_init: |
1306 | err_sb_cpu_port_pms_init: |
1307 | err_sb_cpu_port_sb_cms_init: |
1308 | err_sb_prs_init: |
1309 | mlxsw_sp_sb_ports_fini(mlxsw_sp); |
1310 | err_sb_ports_init: |
1311 | kfree(objp: mlxsw_sp->sb); |
1312 | return err; |
1313 | } |
1314 | |
1315 | void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) |
1316 | { |
1317 | devl_sb_unregister(devlink: priv_to_devlink(priv: mlxsw_sp->core), sb_index: 0); |
1318 | mlxsw_sp_sb_ports_fini(mlxsw_sp); |
1319 | kfree(objp: mlxsw_sp->sb); |
1320 | } |
1321 | |
1322 | int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1323 | { |
1324 | int err; |
1325 | |
1326 | mlxsw_sp_port->hdroom = kzalloc(size: sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL); |
1327 | if (!mlxsw_sp_port->hdroom) |
1328 | return -ENOMEM; |
1329 | mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu; |
1330 | |
1331 | err = mlxsw_sp_port_headroom_init(mlxsw_sp_port); |
1332 | if (err) |
1333 | goto err_headroom_init; |
1334 | err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); |
1335 | if (err) |
1336 | goto err_port_sb_cms_init; |
1337 | err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); |
1338 | if (err) |
1339 | goto err_port_sb_pms_init; |
1340 | return 0; |
1341 | |
1342 | err_port_sb_pms_init: |
1343 | err_port_sb_cms_init: |
1344 | err_headroom_init: |
1345 | kfree(objp: mlxsw_sp_port->hdroom); |
1346 | return err; |
1347 | } |
1348 | |
1349 | void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port) |
1350 | { |
1351 | kfree(objp: mlxsw_sp_port->hdroom); |
1352 | } |
1353 | |
1354 | int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, |
1355 | unsigned int sb_index, u16 pool_index, |
1356 | struct devlink_sb_pool_info *pool_info) |
1357 | { |
1358 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
1359 | enum mlxsw_reg_sbxx_dir dir; |
1360 | struct mlxsw_sp_sb_pr *pr; |
1361 | |
1362 | dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir; |
1363 | pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); |
1364 | pool_info->pool_type = (enum devlink_sb_pool_type) dir; |
1365 | pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, cells: pr->size); |
1366 | pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; |
1367 | pool_info->cell_size = mlxsw_sp->sb->cell_size; |
1368 | return 0; |
1369 | } |
1370 | |
1371 | int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, |
1372 | unsigned int sb_index, u16 pool_index, u32 size, |
1373 | enum devlink_sb_threshold_type threshold_type, |
1374 | struct netlink_ext_ack *extack) |
1375 | { |
1376 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
1377 | u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: size); |
1378 | const struct mlxsw_sp_sb_pr *pr; |
1379 | enum mlxsw_reg_sbpr_mode mode; |
1380 | |
1381 | mode = (enum mlxsw_reg_sbpr_mode) threshold_type; |
1382 | pr = &mlxsw_sp->sb_vals->prs[pool_index]; |
1383 | |
1384 | if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, |
1385 | GUARANTEED_SHARED_BUFFER)) { |
1386 | NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size" ); |
1387 | return -EINVAL; |
1388 | } |
1389 | |
1390 | if (pr->freeze_mode && pr->mode != mode) { |
1391 | NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden" ); |
1392 | return -EINVAL; |
1393 | } |
1394 | |
1395 | if (pr->freeze_size && pr->size != size) { |
1396 | NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden" ); |
1397 | return -EINVAL; |
1398 | } |
1399 | |
1400 | return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, |
1401 | size: pool_size, infi_size: false); |
1402 | } |
1403 | |
1404 | #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ |
1405 | |
1406 | static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index, |
1407 | u32 max_buff) |
1408 | { |
1409 | struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); |
1410 | |
1411 | if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) |
1412 | return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; |
1413 | return mlxsw_sp_cells_bytes(mlxsw_sp, cells: max_buff); |
1414 | } |
1415 | |
1416 | static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, |
1417 | u32 threshold, u32 *p_max_buff, |
1418 | struct netlink_ext_ack *extack) |
1419 | { |
1420 | struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); |
1421 | |
1422 | if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { |
1423 | int val; |
1424 | |
1425 | val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; |
1426 | if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || |
1427 | val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) { |
1428 | NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value" ); |
1429 | return -EINVAL; |
1430 | } |
1431 | *p_max_buff = val; |
1432 | } else { |
1433 | *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, bytes: threshold); |
1434 | } |
1435 | return 0; |
1436 | } |
1437 | |
1438 | int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, |
1439 | unsigned int sb_index, u16 pool_index, |
1440 | u32 *p_threshold) |
1441 | { |
1442 | struct mlxsw_sp_port *mlxsw_sp_port = |
1443 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1444 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1445 | u16 local_port = mlxsw_sp_port->local_port; |
1446 | struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, |
1447 | pool_index); |
1448 | |
1449 | *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index, |
1450 | max_buff: pm->max_buff); |
1451 | return 0; |
1452 | } |
1453 | |
1454 | int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, |
1455 | unsigned int sb_index, u16 pool_index, |
1456 | u32 threshold, struct netlink_ext_ack *extack) |
1457 | { |
1458 | struct mlxsw_sp_port *mlxsw_sp_port = |
1459 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1460 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1461 | u16 local_port = mlxsw_sp_port->local_port; |
1462 | u32 max_buff; |
1463 | int err; |
1464 | |
1465 | if (local_port == MLXSW_PORT_CPU_PORT) { |
1466 | NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden" ); |
1467 | return -EINVAL; |
1468 | } |
1469 | |
1470 | err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, |
1471 | threshold, p_max_buff: &max_buff, extack); |
1472 | if (err) |
1473 | return err; |
1474 | |
1475 | return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index, |
1476 | min_buff: 0, max_buff); |
1477 | } |
1478 | |
1479 | int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, |
1480 | unsigned int sb_index, u16 tc_index, |
1481 | enum devlink_sb_pool_type pool_type, |
1482 | u16 *p_pool_index, u32 *p_threshold) |
1483 | { |
1484 | struct mlxsw_sp_port *mlxsw_sp_port = |
1485 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1486 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1487 | u16 local_port = mlxsw_sp_port->local_port; |
1488 | u8 pg_buff = tc_index; |
1489 | enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; |
1490 | struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, |
1491 | pg_buff, dir); |
1492 | |
1493 | *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index: cm->pool_index, |
1494 | max_buff: cm->max_buff); |
1495 | *p_pool_index = cm->pool_index; |
1496 | return 0; |
1497 | } |
1498 | |
1499 | int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, |
1500 | unsigned int sb_index, u16 tc_index, |
1501 | enum devlink_sb_pool_type pool_type, |
1502 | u16 pool_index, u32 threshold, |
1503 | struct netlink_ext_ack *extack) |
1504 | { |
1505 | struct mlxsw_sp_port *mlxsw_sp_port = |
1506 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1507 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1508 | u16 local_port = mlxsw_sp_port->local_port; |
1509 | const struct mlxsw_sp_sb_cm *cm; |
1510 | u8 pg_buff = tc_index; |
1511 | enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; |
1512 | u32 max_buff; |
1513 | int err; |
1514 | |
1515 | if (local_port == MLXSW_PORT_CPU_PORT) { |
1516 | NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden" ); |
1517 | return -EINVAL; |
1518 | } |
1519 | |
1520 | if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) { |
1521 | NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden" ); |
1522 | return -EINVAL; |
1523 | } |
1524 | |
1525 | if (dir == MLXSW_REG_SBXX_DIR_INGRESS) |
1526 | cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index]; |
1527 | else |
1528 | cm = &mlxsw_sp->sb_vals->cms_egress[tc_index]; |
1529 | |
1530 | if (cm->freeze_pool && cm->pool_index != pool_index) { |
1531 | NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden" ); |
1532 | return -EINVAL; |
1533 | } |
1534 | |
1535 | if (cm->freeze_thresh && cm->max_buff != threshold) { |
1536 | NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden" ); |
1537 | return -EINVAL; |
1538 | } |
1539 | |
1540 | err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, |
1541 | threshold, p_max_buff: &max_buff, extack); |
1542 | if (err) |
1543 | return err; |
1544 | |
1545 | return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, |
1546 | min_buff: 0, max_buff, infi_max: false, pool_index); |
1547 | } |
1548 | |
1549 | #define MASKED_COUNT_MAX \ |
1550 | (MLXSW_REG_SBSR_REC_MAX_COUNT / \ |
1551 | (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT)) |
1552 | |
1553 | struct mlxsw_sp_sb_sr_occ_query_cb_ctx { |
1554 | u8 masked_count; |
1555 | u16 local_port_1; |
1556 | }; |
1557 | |
1558 | static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, |
1559 | char *sbsr_pl, size_t sbsr_pl_len, |
1560 | unsigned long cb_priv) |
1561 | { |
1562 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
1563 | struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; |
1564 | u8 masked_count; |
1565 | u16 local_port; |
1566 | int rec_index = 0; |
1567 | struct mlxsw_sp_sb_cm *cm; |
1568 | int i; |
1569 | |
1570 | memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx)); |
1571 | |
1572 | masked_count = 0; |
1573 | for (local_port = cb_ctx.local_port_1; |
1574 | local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { |
1575 | if (!mlxsw_sp->ports[local_port]) |
1576 | continue; |
1577 | if (local_port == MLXSW_PORT_CPU_PORT) { |
1578 | /* Ingress quotas are not supported for the CPU port */ |
1579 | masked_count++; |
1580 | continue; |
1581 | } |
1582 | for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) { |
1583 | cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff: i, |
1584 | dir: MLXSW_REG_SBXX_DIR_INGRESS); |
1585 | mlxsw_reg_sbsr_rec_unpack(payload: sbsr_pl, rec_index: rec_index++, |
1586 | p_buff_occupancy: &cm->occ.cur, p_max_buff_occupancy: &cm->occ.max); |
1587 | } |
1588 | if (++masked_count == cb_ctx.masked_count) |
1589 | break; |
1590 | } |
1591 | masked_count = 0; |
1592 | for (local_port = cb_ctx.local_port_1; |
1593 | local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { |
1594 | if (!mlxsw_sp->ports[local_port]) |
1595 | continue; |
1596 | for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) { |
1597 | cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff: i, |
1598 | dir: MLXSW_REG_SBXX_DIR_EGRESS); |
1599 | mlxsw_reg_sbsr_rec_unpack(payload: sbsr_pl, rec_index: rec_index++, |
1600 | p_buff_occupancy: &cm->occ.cur, p_max_buff_occupancy: &cm->occ.max); |
1601 | } |
1602 | if (++masked_count == cb_ctx.masked_count) |
1603 | break; |
1604 | } |
1605 | } |
1606 | |
1607 | int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, |
1608 | unsigned int sb_index) |
1609 | { |
1610 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
1611 | u16 local_port, local_port_1, last_local_port; |
1612 | struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; |
1613 | u8 masked_count, current_page = 0; |
1614 | unsigned long cb_priv = 0; |
1615 | LIST_HEAD(bulk_list); |
1616 | char *sbsr_pl; |
1617 | int i; |
1618 | int err; |
1619 | int err2; |
1620 | |
1621 | sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); |
1622 | if (!sbsr_pl) |
1623 | return -ENOMEM; |
1624 | |
1625 | local_port = MLXSW_PORT_CPU_PORT; |
1626 | next_batch: |
1627 | local_port_1 = local_port; |
1628 | masked_count = 0; |
1629 | mlxsw_reg_sbsr_pack(payload: sbsr_pl, clr: false); |
1630 | mlxsw_reg_sbsr_port_page_set(buf: sbsr_pl, val: current_page); |
1631 | last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + |
1632 | MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; |
1633 | |
1634 | for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) |
1635 | mlxsw_reg_sbsr_pg_buff_mask_set(buf: sbsr_pl, index: i, val: 1); |
1636 | for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) |
1637 | mlxsw_reg_sbsr_tclass_mask_set(buf: sbsr_pl, index: i, val: 1); |
1638 | for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { |
1639 | if (!mlxsw_sp->ports[local_port]) |
1640 | continue; |
1641 | if (local_port > last_local_port) { |
1642 | current_page++; |
1643 | goto do_query; |
1644 | } |
1645 | if (local_port != MLXSW_PORT_CPU_PORT) { |
1646 | /* Ingress quotas are not supported for the CPU port */ |
1647 | mlxsw_reg_sbsr_ingress_port_mask_set(buf: sbsr_pl, |
1648 | index: local_port, val: 1); |
1649 | } |
1650 | mlxsw_reg_sbsr_egress_port_mask_set(buf: sbsr_pl, index: local_port, val: 1); |
1651 | for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { |
1652 | err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, pool_index: i, |
1653 | bulk_list: &bulk_list); |
1654 | if (err) |
1655 | goto out; |
1656 | } |
1657 | if (++masked_count == MASKED_COUNT_MAX) |
1658 | goto do_query; |
1659 | } |
1660 | |
1661 | do_query: |
1662 | cb_ctx.masked_count = masked_count; |
1663 | cb_ctx.local_port_1 = local_port_1; |
1664 | memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx)); |
1665 | err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), payload: sbsr_pl, |
1666 | bulk_list: &bulk_list, cb: mlxsw_sp_sb_sr_occ_query_cb, |
1667 | cb_priv); |
1668 | if (err) |
1669 | goto out; |
1670 | if (local_port < mlxsw_core_max_ports(mlxsw_core)) { |
1671 | local_port++; |
1672 | goto next_batch; |
1673 | } |
1674 | |
1675 | out: |
1676 | err2 = mlxsw_reg_trans_bulk_wait(bulk_list: &bulk_list); |
1677 | if (!err) |
1678 | err = err2; |
1679 | kfree(objp: sbsr_pl); |
1680 | return err; |
1681 | } |
1682 | |
1683 | int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, |
1684 | unsigned int sb_index) |
1685 | { |
1686 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
1687 | u16 local_port, last_local_port; |
1688 | LIST_HEAD(bulk_list); |
1689 | unsigned int masked_count; |
1690 | u8 current_page = 0; |
1691 | char *sbsr_pl; |
1692 | int i; |
1693 | int err; |
1694 | int err2; |
1695 | |
1696 | sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); |
1697 | if (!sbsr_pl) |
1698 | return -ENOMEM; |
1699 | |
1700 | local_port = MLXSW_PORT_CPU_PORT; |
1701 | next_batch: |
1702 | masked_count = 0; |
1703 | mlxsw_reg_sbsr_pack(payload: sbsr_pl, clr: true); |
1704 | mlxsw_reg_sbsr_port_page_set(buf: sbsr_pl, val: current_page); |
1705 | last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + |
1706 | MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; |
1707 | |
1708 | for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) |
1709 | mlxsw_reg_sbsr_pg_buff_mask_set(buf: sbsr_pl, index: i, val: 1); |
1710 | for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) |
1711 | mlxsw_reg_sbsr_tclass_mask_set(buf: sbsr_pl, index: i, val: 1); |
1712 | for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { |
1713 | if (!mlxsw_sp->ports[local_port]) |
1714 | continue; |
1715 | if (local_port > last_local_port) { |
1716 | current_page++; |
1717 | goto do_query; |
1718 | } |
1719 | if (local_port != MLXSW_PORT_CPU_PORT) { |
1720 | /* Ingress quotas are not supported for the CPU port */ |
1721 | mlxsw_reg_sbsr_ingress_port_mask_set(buf: sbsr_pl, |
1722 | index: local_port, val: 1); |
1723 | } |
1724 | mlxsw_reg_sbsr_egress_port_mask_set(buf: sbsr_pl, index: local_port, val: 1); |
1725 | for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { |
1726 | err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, pool_index: i, |
1727 | bulk_list: &bulk_list); |
1728 | if (err) |
1729 | goto out; |
1730 | } |
1731 | if (++masked_count == MASKED_COUNT_MAX) |
1732 | goto do_query; |
1733 | } |
1734 | |
1735 | do_query: |
1736 | err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), payload: sbsr_pl, |
1737 | bulk_list: &bulk_list, NULL, cb_priv: 0); |
1738 | if (err) |
1739 | goto out; |
1740 | if (local_port < mlxsw_core_max_ports(mlxsw_core)) { |
1741 | local_port++; |
1742 | goto next_batch; |
1743 | } |
1744 | |
1745 | out: |
1746 | err2 = mlxsw_reg_trans_bulk_wait(bulk_list: &bulk_list); |
1747 | if (!err) |
1748 | err = err2; |
1749 | kfree(objp: sbsr_pl); |
1750 | return err; |
1751 | } |
1752 | |
1753 | int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, |
1754 | unsigned int sb_index, u16 pool_index, |
1755 | u32 *p_cur, u32 *p_max) |
1756 | { |
1757 | struct mlxsw_sp_port *mlxsw_sp_port = |
1758 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1759 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1760 | u16 local_port = mlxsw_sp_port->local_port; |
1761 | struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, |
1762 | pool_index); |
1763 | |
1764 | *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cells: pm->occ.cur); |
1765 | *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cells: pm->occ.max); |
1766 | return 0; |
1767 | } |
1768 | |
1769 | int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, |
1770 | unsigned int sb_index, u16 tc_index, |
1771 | enum devlink_sb_pool_type pool_type, |
1772 | u32 *p_cur, u32 *p_max) |
1773 | { |
1774 | struct mlxsw_sp_port *mlxsw_sp_port = |
1775 | mlxsw_core_port_driver_priv(mlxsw_core_port); |
1776 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1777 | u16 local_port = mlxsw_sp_port->local_port; |
1778 | u8 pg_buff = tc_index; |
1779 | enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; |
1780 | struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, |
1781 | pg_buff, dir); |
1782 | |
1783 | *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cells: cm->occ.cur); |
1784 | *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cells: cm->occ.max); |
1785 | return 0; |
1786 | } |
1787 | |