1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 |
2 | /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/device.h> |
7 | #include <linux/export.h> |
8 | #include <linux/err.h> |
9 | #include <linux/if_link.h> |
10 | #include <linux/netdevice.h> |
11 | #include <linux/completion.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/types.h> |
15 | #include <linux/string.h> |
16 | #include <linux/gfp.h> |
17 | #include <linux/random.h> |
18 | #include <linux/jiffies.h> |
19 | #include <linux/mutex.h> |
20 | #include <linux/rcupdate.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/workqueue.h> |
23 | #include <linux/firmware.h> |
24 | #include <asm/byteorder.h> |
25 | #include <net/devlink.h> |
26 | #include <trace/events/devlink.h> |
27 | |
28 | #include "core.h" |
29 | #include "core_env.h" |
30 | #include "item.h" |
31 | #include "cmd.h" |
32 | #include "port.h" |
33 | #include "trap.h" |
34 | #include "emad.h" |
35 | #include "reg.h" |
36 | #include "resources.h" |
37 | #include "../mlxfw/mlxfw.h" |
38 | |
39 | static LIST_HEAD(mlxsw_core_driver_list); |
40 | static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); |
41 | |
42 | static const char mlxsw_core_driver_name[] = "mlxsw_core" ; |
43 | |
44 | static struct workqueue_struct *mlxsw_wq; |
45 | static struct workqueue_struct *mlxsw_owq; |
46 | |
47 | struct mlxsw_core_port { |
48 | struct devlink_port devlink_port; |
49 | void *port_driver_priv; |
50 | u16 local_port; |
51 | struct mlxsw_linecard *linecard; |
52 | }; |
53 | |
54 | void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) |
55 | { |
56 | return mlxsw_core_port->port_driver_priv; |
57 | } |
58 | EXPORT_SYMBOL(mlxsw_core_port_driver_priv); |
59 | |
60 | static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) |
61 | { |
62 | return mlxsw_core_port->port_driver_priv != NULL; |
63 | } |
64 | |
65 | struct mlxsw_core { |
66 | struct mlxsw_driver *driver; |
67 | const struct mlxsw_bus *bus; |
68 | void *bus_priv; |
69 | const struct mlxsw_bus_info *bus_info; |
70 | struct workqueue_struct *emad_wq; |
71 | struct list_head rx_listener_list; |
72 | struct list_head event_listener_list; |
73 | struct list_head irq_event_handler_list; |
74 | struct mutex irq_event_handler_lock; /* Locks access to handlers list */ |
75 | struct { |
76 | atomic64_t tid; |
77 | struct list_head trans_list; |
78 | spinlock_t trans_list_lock; /* protects trans_list writes */ |
79 | bool use_emad; |
80 | bool enable_string_tlv; |
81 | bool enable_latency_tlv; |
82 | } emad; |
83 | struct { |
84 | u16 *mapping; /* lag_id+port_index to local_port mapping */ |
85 | } lag; |
86 | struct mlxsw_res res; |
87 | struct mlxsw_hwmon *hwmon; |
88 | struct mlxsw_thermal *thermal; |
89 | struct mlxsw_linecards *linecards; |
90 | struct mlxsw_core_port *ports; |
91 | unsigned int max_ports; |
92 | atomic_t active_ports_count; |
93 | bool fw_flash_in_progress; |
94 | struct { |
95 | struct devlink_health_reporter *fw_fatal; |
96 | } health; |
97 | struct mlxsw_env *env; |
98 | unsigned long driver_priv[]; |
99 | /* driver_priv has to be always the last item */ |
100 | }; |
101 | |
102 | struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core) |
103 | { |
104 | return mlxsw_core->linecards; |
105 | } |
106 | |
107 | void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, |
108 | struct mlxsw_linecards *linecards) |
109 | { |
110 | mlxsw_core->linecards = linecards; |
111 | } |
112 | |
113 | #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 |
114 | |
115 | static u64 mlxsw_ports_occ_get(void *priv) |
116 | { |
117 | struct mlxsw_core *mlxsw_core = priv; |
118 | |
119 | return atomic_read(v: &mlxsw_core->active_ports_count); |
120 | } |
121 | |
122 | static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core) |
123 | { |
124 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
125 | struct devlink_resource_size_params ports_num_params; |
126 | u32 max_ports; |
127 | |
128 | max_ports = mlxsw_core->max_ports - 1; |
129 | devlink_resource_size_params_init(size_params: &ports_num_params, size_min: max_ports, |
130 | size_max: max_ports, size_granularity: 1, |
131 | unit: DEVLINK_RESOURCE_UNIT_ENTRY); |
132 | |
133 | return devl_resource_register(devlink, |
134 | DEVLINK_RESOURCE_GENERIC_NAME_PORTS, |
135 | resource_size: max_ports, resource_id: MLXSW_CORE_RESOURCE_PORTS, |
136 | DEVLINK_RESOURCE_ID_PARENT_TOP, |
137 | size_params: &ports_num_params); |
138 | } |
139 | |
140 | static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload) |
141 | { |
142 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
143 | int err; |
144 | |
145 | /* Switch ports are numbered from 1 to queried value */ |
146 | if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) |
147 | mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, |
148 | MAX_SYSTEM_PORT) + 1; |
149 | else |
150 | mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; |
151 | |
152 | mlxsw_core->ports = kcalloc(n: mlxsw_core->max_ports, |
153 | size: sizeof(struct mlxsw_core_port), GFP_KERNEL); |
154 | if (!mlxsw_core->ports) |
155 | return -ENOMEM; |
156 | |
157 | if (!reload) { |
158 | err = mlxsw_core_resources_ports_register(mlxsw_core); |
159 | if (err) |
160 | goto err_resources_ports_register; |
161 | } |
162 | atomic_set(v: &mlxsw_core->active_ports_count, i: 0); |
163 | devl_resource_occ_get_register(devlink, resource_id: MLXSW_CORE_RESOURCE_PORTS, |
164 | occ_get: mlxsw_ports_occ_get, occ_get_priv: mlxsw_core); |
165 | |
166 | return 0; |
167 | |
168 | err_resources_ports_register: |
169 | kfree(objp: mlxsw_core->ports); |
170 | return err; |
171 | } |
172 | |
173 | static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload) |
174 | { |
175 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
176 | |
177 | devl_resource_occ_get_unregister(devlink, resource_id: MLXSW_CORE_RESOURCE_PORTS); |
178 | if (!reload) |
179 | devl_resources_unregister(devlink: priv_to_devlink(priv: mlxsw_core)); |
180 | |
181 | kfree(objp: mlxsw_core->ports); |
182 | } |
183 | |
184 | unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) |
185 | { |
186 | return mlxsw_core->max_ports; |
187 | } |
188 | EXPORT_SYMBOL(mlxsw_core_max_ports); |
189 | |
190 | int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag) |
191 | { |
192 | struct mlxsw_driver *driver = mlxsw_core->driver; |
193 | |
194 | if (driver->profile->used_max_lag) { |
195 | *p_max_lag = driver->profile->max_lag; |
196 | return 0; |
197 | } |
198 | |
199 | if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG)) |
200 | return -EIO; |
201 | |
202 | *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG); |
203 | return 0; |
204 | } |
205 | EXPORT_SYMBOL(mlxsw_core_max_lag); |
206 | |
207 | enum mlxsw_cmd_mbox_config_profile_lag_mode |
208 | mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) |
209 | { |
210 | return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv); |
211 | } |
212 | EXPORT_SYMBOL(mlxsw_core_lag_mode); |
213 | |
214 | void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) |
215 | { |
216 | return mlxsw_core->driver_priv; |
217 | } |
218 | EXPORT_SYMBOL(mlxsw_core_driver_priv); |
219 | |
220 | bool |
221 | mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, |
222 | const struct mlxsw_fw_rev *req_rev) |
223 | { |
224 | return rev->minor > req_rev->minor || |
225 | (rev->minor == req_rev->minor && |
226 | rev->subminor >= req_rev->subminor); |
227 | } |
228 | EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate); |
229 | |
230 | struct mlxsw_rx_listener_item { |
231 | struct list_head list; |
232 | struct mlxsw_rx_listener rxl; |
233 | void *priv; |
234 | bool enabled; |
235 | }; |
236 | |
237 | struct mlxsw_event_listener_item { |
238 | struct list_head list; |
239 | struct mlxsw_core *mlxsw_core; |
240 | struct mlxsw_event_listener el; |
241 | void *priv; |
242 | }; |
243 | |
244 | static const u8 mlxsw_core_trap_groups[] = { |
245 | MLXSW_REG_HTGT_TRAP_GROUP_EMAD, |
246 | MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT, |
247 | }; |
248 | |
249 | static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core) |
250 | { |
251 | char htgt_pl[MLXSW_REG_HTGT_LEN]; |
252 | int err; |
253 | int i; |
254 | |
255 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
256 | return 0; |
257 | |
258 | for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) { |
259 | mlxsw_reg_htgt_pack(payload: htgt_pl, group: mlxsw_core_trap_groups[i], |
260 | MLXSW_REG_HTGT_INVALID_POLICER, |
261 | MLXSW_REG_HTGT_DEFAULT_PRIORITY, |
262 | MLXSW_REG_HTGT_DEFAULT_TC); |
263 | err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), payload: htgt_pl); |
264 | if (err) |
265 | return err; |
266 | } |
267 | return 0; |
268 | } |
269 | |
270 | /****************** |
271 | * EMAD processing |
272 | ******************/ |
273 | |
274 | /* emad_eth_hdr_dmac |
275 | * Destination MAC in EMAD's Ethernet header. |
276 | * Must be set to 01:02:c9:00:00:01 |
277 | */ |
278 | MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); |
279 | |
280 | /* emad_eth_hdr_smac |
281 | * Source MAC in EMAD's Ethernet header. |
282 | * Must be set to 00:02:c9:01:02:03 |
283 | */ |
284 | MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); |
285 | |
286 | /* emad_eth_hdr_ethertype |
287 | * Ethertype in EMAD's Ethernet header. |
288 | * Must be set to 0x8932 |
289 | */ |
290 | MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); |
291 | |
292 | /* emad_eth_hdr_mlx_proto |
293 | * Mellanox protocol. |
294 | * Must be set to 0x0. |
295 | */ |
296 | MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); |
297 | |
298 | /* emad_eth_hdr_ver |
299 | * Mellanox protocol version. |
300 | * Must be set to 0x0. |
301 | */ |
302 | MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); |
303 | |
304 | /* emad_op_tlv_type |
305 | * Type of the TLV. |
306 | * Must be set to 0x1 (operation TLV). |
307 | */ |
308 | MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); |
309 | |
310 | /* emad_op_tlv_len |
311 | * Length of the operation TLV in u32. |
312 | * Must be set to 0x4. |
313 | */ |
314 | MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); |
315 | |
316 | /* emad_op_tlv_dr |
317 | * Direct route bit. Setting to 1 indicates the EMAD is a direct route |
318 | * EMAD. DR TLV must follow. |
319 | * |
320 | * Note: Currently not supported and must not be set. |
321 | */ |
322 | MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); |
323 | |
324 | /* emad_op_tlv_status |
325 | * Returned status in case of EMAD response. Must be set to 0 in case |
326 | * of EMAD request. |
327 | * 0x0 - success |
328 | * 0x1 - device is busy. Requester should retry |
329 | * 0x2 - Mellanox protocol version not supported |
330 | * 0x3 - unknown TLV |
331 | * 0x4 - register not supported |
332 | * 0x5 - operation class not supported |
333 | * 0x6 - EMAD method not supported |
334 | * 0x7 - bad parameter (e.g. port out of range) |
335 | * 0x8 - resource not available |
336 | * 0x9 - message receipt acknowledgment. Requester should retry |
337 | * 0x70 - internal error |
338 | */ |
339 | MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); |
340 | |
341 | /* emad_op_tlv_register_id |
342 | * Register ID of register within register TLV. |
343 | */ |
344 | MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); |
345 | |
346 | /* emad_op_tlv_r |
347 | * Response bit. Setting to 1 indicates Response, otherwise request. |
348 | */ |
349 | MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); |
350 | |
351 | /* emad_op_tlv_method |
352 | * EMAD method type. |
353 | * 0x1 - query |
354 | * 0x2 - write |
355 | * 0x3 - send (currently not supported) |
356 | * 0x4 - event |
357 | */ |
358 | MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); |
359 | |
360 | /* emad_op_tlv_class |
361 | * EMAD operation class. Must be set to 0x1 (REG_ACCESS). |
362 | */ |
363 | MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); |
364 | |
365 | /* emad_op_tlv_tid |
366 | * EMAD transaction ID. Used for pairing request and response EMADs. |
367 | */ |
368 | MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); |
369 | |
370 | /* emad_string_tlv_type |
371 | * Type of the TLV. |
372 | * Must be set to 0x2 (string TLV). |
373 | */ |
374 | MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5); |
375 | |
376 | /* emad_string_tlv_len |
377 | * Length of the string TLV in u32. |
378 | */ |
379 | MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11); |
380 | |
381 | #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128 |
382 | |
383 | /* emad_string_tlv_string |
384 | * String provided by the device's firmware in case of erroneous register access |
385 | */ |
386 | MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04, |
387 | MLXSW_EMAD_STRING_TLV_STRING_LEN); |
388 | |
389 | /* emad_latency_tlv_type |
390 | * Type of the TLV. |
391 | * Must be set to 0x4 (latency TLV). |
392 | */ |
393 | MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5); |
394 | |
395 | /* emad_latency_tlv_len |
396 | * Length of the latency TLV in u32. |
397 | */ |
398 | MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11); |
399 | |
400 | /* emad_latency_tlv_latency_time |
401 | * EMAD latency time in units of uSec. |
402 | */ |
403 | MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32); |
404 | |
405 | /* emad_reg_tlv_type |
406 | * Type of the TLV. |
407 | * Must be set to 0x3 (register TLV). |
408 | */ |
409 | MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); |
410 | |
411 | /* emad_reg_tlv_len |
412 | * Length of the operation TLV in u32. |
413 | */ |
414 | MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); |
415 | |
416 | /* emad_end_tlv_type |
417 | * Type of the TLV. |
418 | * Must be set to 0x0 (end TLV). |
419 | */ |
420 | MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); |
421 | |
422 | /* emad_end_tlv_len |
423 | * Length of the end TLV in u32. |
424 | * Must be set to 1. |
425 | */ |
426 | MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); |
427 | |
428 | enum mlxsw_core_reg_access_type { |
429 | MLXSW_CORE_REG_ACCESS_TYPE_QUERY, |
430 | MLXSW_CORE_REG_ACCESS_TYPE_WRITE, |
431 | }; |
432 | |
433 | static inline const char * |
434 | mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) |
435 | { |
436 | switch (type) { |
437 | case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: |
438 | return "query" ; |
439 | case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: |
440 | return "write" ; |
441 | } |
442 | BUG(); |
443 | } |
444 | |
445 | static void mlxsw_emad_pack_end_tlv(char *end_tlv) |
446 | { |
447 | mlxsw_emad_end_tlv_type_set(buf: end_tlv, val: MLXSW_EMAD_TLV_TYPE_END); |
448 | mlxsw_emad_end_tlv_len_set(buf: end_tlv, MLXSW_EMAD_END_TLV_LEN); |
449 | } |
450 | |
451 | static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, |
452 | const struct mlxsw_reg_info *reg, |
453 | char *payload) |
454 | { |
455 | mlxsw_emad_reg_tlv_type_set(buf: reg_tlv, val: MLXSW_EMAD_TLV_TYPE_REG); |
456 | mlxsw_emad_reg_tlv_len_set(buf: reg_tlv, val: reg->len / sizeof(u32) + 1); |
457 | memcpy(reg_tlv + sizeof(u32), payload, reg->len); |
458 | } |
459 | |
460 | static void mlxsw_emad_pack_string_tlv(char *string_tlv) |
461 | { |
462 | mlxsw_emad_string_tlv_type_set(buf: string_tlv, val: MLXSW_EMAD_TLV_TYPE_STRING); |
463 | mlxsw_emad_string_tlv_len_set(buf: string_tlv, MLXSW_EMAD_STRING_TLV_LEN); |
464 | } |
465 | |
466 | static void mlxsw_emad_pack_op_tlv(char *op_tlv, |
467 | const struct mlxsw_reg_info *reg, |
468 | enum mlxsw_core_reg_access_type type, |
469 | u64 tid) |
470 | { |
471 | mlxsw_emad_op_tlv_type_set(buf: op_tlv, val: MLXSW_EMAD_TLV_TYPE_OP); |
472 | mlxsw_emad_op_tlv_len_set(buf: op_tlv, MLXSW_EMAD_OP_TLV_LEN); |
473 | mlxsw_emad_op_tlv_dr_set(buf: op_tlv, val: 0); |
474 | mlxsw_emad_op_tlv_status_set(buf: op_tlv, val: 0); |
475 | mlxsw_emad_op_tlv_register_id_set(buf: op_tlv, val: reg->id); |
476 | mlxsw_emad_op_tlv_r_set(buf: op_tlv, val: MLXSW_EMAD_OP_TLV_REQUEST); |
477 | if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) |
478 | mlxsw_emad_op_tlv_method_set(buf: op_tlv, |
479 | val: MLXSW_EMAD_OP_TLV_METHOD_QUERY); |
480 | else |
481 | mlxsw_emad_op_tlv_method_set(buf: op_tlv, |
482 | val: MLXSW_EMAD_OP_TLV_METHOD_WRITE); |
483 | mlxsw_emad_op_tlv_class_set(buf: op_tlv, |
484 | val: MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); |
485 | mlxsw_emad_op_tlv_tid_set(buf: op_tlv, val: tid); |
486 | } |
487 | |
488 | static void mlxsw_emad_pack_latency_tlv(char *latency_tlv) |
489 | { |
490 | mlxsw_emad_latency_tlv_type_set(buf: latency_tlv, val: MLXSW_EMAD_TLV_TYPE_LATENCY); |
491 | mlxsw_emad_latency_tlv_len_set(buf: latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN); |
492 | } |
493 | |
494 | static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) |
495 | { |
496 | char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); |
497 | |
498 | mlxsw_emad_eth_hdr_dmac_memcpy_to(buf: eth_hdr, MLXSW_EMAD_EH_DMAC); |
499 | mlxsw_emad_eth_hdr_smac_memcpy_to(buf: eth_hdr, MLXSW_EMAD_EH_SMAC); |
500 | mlxsw_emad_eth_hdr_ethertype_set(buf: eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); |
501 | mlxsw_emad_eth_hdr_mlx_proto_set(buf: eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); |
502 | mlxsw_emad_eth_hdr_ver_set(buf: eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); |
503 | |
504 | skb_reset_mac_header(skb); |
505 | |
506 | return 0; |
507 | } |
508 | |
509 | static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core, |
510 | struct sk_buff *skb, |
511 | const struct mlxsw_reg_info *reg, |
512 | char *payload, |
513 | enum mlxsw_core_reg_access_type type, u64 tid) |
514 | { |
515 | char *buf; |
516 | |
517 | buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); |
518 | mlxsw_emad_pack_end_tlv(end_tlv: buf); |
519 | |
520 | buf = skb_push(skb, len: reg->len + sizeof(u32)); |
521 | mlxsw_emad_pack_reg_tlv(reg_tlv: buf, reg, payload); |
522 | |
523 | if (mlxsw_core->emad.enable_latency_tlv) { |
524 | buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32)); |
525 | mlxsw_emad_pack_latency_tlv(latency_tlv: buf); |
526 | } |
527 | |
528 | if (mlxsw_core->emad.enable_string_tlv) { |
529 | buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32)); |
530 | mlxsw_emad_pack_string_tlv(string_tlv: buf); |
531 | } |
532 | |
533 | buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); |
534 | mlxsw_emad_pack_op_tlv(op_tlv: buf, reg, type, tid); |
535 | |
536 | mlxsw_emad_construct_eth_hdr(skb); |
537 | } |
538 | |
539 | struct mlxsw_emad_tlv_offsets { |
540 | u16 op_tlv; |
541 | u16 string_tlv; |
542 | u16 latency_tlv; |
543 | u16 reg_tlv; |
544 | }; |
545 | |
546 | static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv) |
547 | { |
548 | u8 tlv_type = mlxsw_emad_string_tlv_type_get(buf: tlv); |
549 | |
550 | return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING; |
551 | } |
552 | |
553 | static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv) |
554 | { |
555 | u8 tlv_type = mlxsw_emad_latency_tlv_type_get(buf: tlv); |
556 | |
557 | return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY; |
558 | } |
559 | |
560 | static void mlxsw_emad_tlv_parse(struct sk_buff *skb) |
561 | { |
562 | struct mlxsw_emad_tlv_offsets *offsets = |
563 | (struct mlxsw_emad_tlv_offsets *) skb->cb; |
564 | |
565 | offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN; |
566 | offsets->string_tlv = 0; |
567 | offsets->latency_tlv = 0; |
568 | |
569 | offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN + |
570 | MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); |
571 | |
572 | /* If string TLV is present, it must come after the operation TLV. */ |
573 | if (mlxsw_emad_tlv_is_string_tlv(tlv: skb->data + offsets->reg_tlv)) { |
574 | offsets->string_tlv = offsets->reg_tlv; |
575 | offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); |
576 | } |
577 | |
578 | if (mlxsw_emad_tlv_is_latency_tlv(tlv: skb->data + offsets->reg_tlv)) { |
579 | offsets->latency_tlv = offsets->reg_tlv; |
580 | offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); |
581 | } |
582 | } |
583 | |
584 | static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) |
585 | { |
586 | struct mlxsw_emad_tlv_offsets *offsets = |
587 | (struct mlxsw_emad_tlv_offsets *) skb->cb; |
588 | |
589 | return ((char *) (skb->data + offsets->op_tlv)); |
590 | } |
591 | |
592 | static char *mlxsw_emad_string_tlv(const struct sk_buff *skb) |
593 | { |
594 | struct mlxsw_emad_tlv_offsets *offsets = |
595 | (struct mlxsw_emad_tlv_offsets *) skb->cb; |
596 | |
597 | if (!offsets->string_tlv) |
598 | return NULL; |
599 | |
600 | return ((char *) (skb->data + offsets->string_tlv)); |
601 | } |
602 | |
603 | static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) |
604 | { |
605 | struct mlxsw_emad_tlv_offsets *offsets = |
606 | (struct mlxsw_emad_tlv_offsets *) skb->cb; |
607 | |
608 | return ((char *) (skb->data + offsets->reg_tlv)); |
609 | } |
610 | |
611 | static char *mlxsw_emad_reg_payload(const char *reg_tlv) |
612 | { |
613 | return ((char *) (reg_tlv + sizeof(u32))); |
614 | } |
615 | |
616 | static char *mlxsw_emad_reg_payload_cmd(const char *mbox) |
617 | { |
618 | return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); |
619 | } |
620 | |
621 | static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) |
622 | { |
623 | char *op_tlv; |
624 | |
625 | op_tlv = mlxsw_emad_op_tlv(skb); |
626 | return mlxsw_emad_op_tlv_tid_get(buf: op_tlv); |
627 | } |
628 | |
629 | static bool mlxsw_emad_is_resp(const struct sk_buff *skb) |
630 | { |
631 | char *op_tlv; |
632 | |
633 | op_tlv = mlxsw_emad_op_tlv(skb); |
634 | return (mlxsw_emad_op_tlv_r_get(buf: op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); |
635 | } |
636 | |
637 | static int mlxsw_emad_process_status(char *op_tlv, |
638 | enum mlxsw_emad_op_tlv_status *p_status) |
639 | { |
640 | *p_status = mlxsw_emad_op_tlv_status_get(buf: op_tlv); |
641 | |
642 | switch (*p_status) { |
643 | case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: |
644 | return 0; |
645 | case MLXSW_EMAD_OP_TLV_STATUS_BUSY: |
646 | case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: |
647 | return -EAGAIN; |
648 | case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: |
649 | case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: |
650 | case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: |
651 | case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: |
652 | case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: |
653 | case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: |
654 | case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: |
655 | case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: |
656 | default: |
657 | return -EIO; |
658 | } |
659 | } |
660 | |
661 | static int |
662 | mlxsw_emad_process_status_skb(struct sk_buff *skb, |
663 | enum mlxsw_emad_op_tlv_status *p_status) |
664 | { |
665 | return mlxsw_emad_process_status(op_tlv: mlxsw_emad_op_tlv(skb), p_status); |
666 | } |
667 | |
668 | struct mlxsw_reg_trans { |
669 | struct list_head list; |
670 | struct list_head bulk_list; |
671 | struct mlxsw_core *core; |
672 | struct sk_buff *tx_skb; |
673 | struct mlxsw_tx_info tx_info; |
674 | struct delayed_work timeout_dw; |
675 | unsigned int retries; |
676 | u64 tid; |
677 | struct completion completion; |
678 | atomic_t active; |
679 | mlxsw_reg_trans_cb_t *cb; |
680 | unsigned long cb_priv; |
681 | const struct mlxsw_reg_info *reg; |
682 | enum mlxsw_core_reg_access_type type; |
683 | int err; |
684 | char *emad_err_string; |
685 | enum mlxsw_emad_op_tlv_status emad_status; |
686 | struct rcu_head rcu; |
687 | }; |
688 | |
689 | static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb, |
690 | struct mlxsw_reg_trans *trans) |
691 | { |
692 | char *string_tlv; |
693 | char *string; |
694 | |
695 | string_tlv = mlxsw_emad_string_tlv(skb); |
696 | if (!string_tlv) |
697 | return; |
698 | |
699 | trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN, |
700 | GFP_ATOMIC); |
701 | if (!trans->emad_err_string) |
702 | return; |
703 | |
704 | string = mlxsw_emad_string_tlv_string_data(buf: string_tlv); |
705 | strscpy(p: trans->emad_err_string, q: string, |
706 | MLXSW_EMAD_STRING_TLV_STRING_LEN); |
707 | } |
708 | |
709 | #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 |
710 | #define MLXSW_EMAD_TIMEOUT_MS 200 |
711 | |
712 | static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) |
713 | { |
714 | unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); |
715 | |
716 | if (trans->core->fw_flash_in_progress) |
717 | timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); |
718 | |
719 | queue_delayed_work(wq: trans->core->emad_wq, dwork: &trans->timeout_dw, |
720 | delay: timeout << trans->retries); |
721 | } |
722 | |
723 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, |
724 | struct mlxsw_reg_trans *trans) |
725 | { |
726 | struct sk_buff *skb; |
727 | int err; |
728 | |
729 | skb = skb_clone(skb: trans->tx_skb, GFP_KERNEL); |
730 | if (!skb) |
731 | return -ENOMEM; |
732 | |
733 | trace_devlink_hwmsg(devlink: priv_to_devlink(priv: mlxsw_core), incoming: false, type: 0, |
734 | buf: skb->data + mlxsw_core->driver->txhdr_len, |
735 | len: skb->len - mlxsw_core->driver->txhdr_len); |
736 | |
737 | atomic_set(v: &trans->active, i: 1); |
738 | err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info: &trans->tx_info); |
739 | if (err) { |
740 | dev_kfree_skb(skb); |
741 | return err; |
742 | } |
743 | mlxsw_emad_trans_timeout_schedule(trans); |
744 | return 0; |
745 | } |
746 | |
747 | static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) |
748 | { |
749 | struct mlxsw_core *mlxsw_core = trans->core; |
750 | |
751 | dev_kfree_skb(trans->tx_skb); |
752 | spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
753 | list_del_rcu(entry: &trans->list); |
754 | spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
755 | trans->err = err; |
756 | complete(&trans->completion); |
757 | } |
758 | |
759 | static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, |
760 | struct mlxsw_reg_trans *trans) |
761 | { |
762 | int err; |
763 | |
764 | if (trans->retries < MLXSW_EMAD_MAX_RETRY) { |
765 | trans->retries++; |
766 | err = mlxsw_emad_transmit(mlxsw_core: trans->core, trans); |
767 | if (err == 0) |
768 | return; |
769 | |
770 | if (!atomic_dec_and_test(v: &trans->active)) |
771 | return; |
772 | } else { |
773 | err = -EIO; |
774 | } |
775 | mlxsw_emad_trans_finish(trans, err); |
776 | } |
777 | |
778 | static void mlxsw_emad_trans_timeout_work(struct work_struct *work) |
779 | { |
780 | struct mlxsw_reg_trans *trans = container_of(work, |
781 | struct mlxsw_reg_trans, |
782 | timeout_dw.work); |
783 | |
784 | if (!atomic_dec_and_test(v: &trans->active)) |
785 | return; |
786 | |
787 | mlxsw_emad_transmit_retry(mlxsw_core: trans->core, trans); |
788 | } |
789 | |
790 | static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, |
791 | struct mlxsw_reg_trans *trans, |
792 | struct sk_buff *skb) |
793 | { |
794 | int err; |
795 | |
796 | if (!atomic_dec_and_test(v: &trans->active)) |
797 | return; |
798 | |
799 | err = mlxsw_emad_process_status_skb(skb, p_status: &trans->emad_status); |
800 | if (err == -EAGAIN) { |
801 | mlxsw_emad_transmit_retry(mlxsw_core, trans); |
802 | } else { |
803 | if (err == 0) { |
804 | char *reg_tlv = mlxsw_emad_reg_tlv(skb); |
805 | |
806 | if (trans->cb) |
807 | trans->cb(mlxsw_core, |
808 | mlxsw_emad_reg_payload(reg_tlv), |
809 | trans->reg->len, trans->cb_priv); |
810 | } else { |
811 | mlxsw_emad_process_string_tlv(skb, trans); |
812 | } |
813 | mlxsw_emad_trans_finish(trans, err); |
814 | } |
815 | } |
816 | |
817 | /* called with rcu read lock held */ |
818 | static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, |
819 | void *priv) |
820 | { |
821 | struct mlxsw_core *mlxsw_core = priv; |
822 | struct mlxsw_reg_trans *trans; |
823 | |
824 | trace_devlink_hwmsg(devlink: priv_to_devlink(priv: mlxsw_core), incoming: true, type: 0, |
825 | buf: skb->data, len: skb->len); |
826 | |
827 | mlxsw_emad_tlv_parse(skb); |
828 | |
829 | if (!mlxsw_emad_is_resp(skb)) |
830 | goto free_skb; |
831 | |
832 | list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { |
833 | if (mlxsw_emad_get_tid(skb) == trans->tid) { |
834 | mlxsw_emad_process_response(mlxsw_core, trans, skb); |
835 | break; |
836 | } |
837 | } |
838 | |
839 | free_skb: |
840 | dev_kfree_skb(skb); |
841 | } |
842 | |
843 | static const struct mlxsw_listener mlxsw_emad_rx_listener = |
844 | MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, |
845 | EMAD, DISCARD); |
846 | |
847 | static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core) |
848 | { |
849 | char mgir_pl[MLXSW_REG_MGIR_LEN]; |
850 | bool string_tlv, latency_tlv; |
851 | int err; |
852 | |
853 | mlxsw_reg_mgir_pack(payload: mgir_pl); |
854 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), payload: mgir_pl); |
855 | if (err) |
856 | return err; |
857 | |
858 | string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(buf: mgir_pl); |
859 | mlxsw_core->emad.enable_string_tlv = string_tlv; |
860 | |
861 | latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(buf: mgir_pl); |
862 | mlxsw_core->emad.enable_latency_tlv = latency_tlv; |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core) |
868 | { |
869 | mlxsw_core->emad.enable_latency_tlv = false; |
870 | mlxsw_core->emad.enable_string_tlv = false; |
871 | } |
872 | |
873 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) |
874 | { |
875 | struct workqueue_struct *emad_wq; |
876 | u64 tid; |
877 | int err; |
878 | |
879 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
880 | return 0; |
881 | |
882 | emad_wq = alloc_workqueue(fmt: "mlxsw_core_emad" , flags: 0, max_active: 0); |
883 | if (!emad_wq) |
884 | return -ENOMEM; |
885 | mlxsw_core->emad_wq = emad_wq; |
886 | |
887 | /* Set the upper 32 bits of the transaction ID field to a random |
888 | * number. This allows us to discard EMADs addressed to other |
889 | * devices. |
890 | */ |
891 | get_random_bytes(buf: &tid, len: 4); |
892 | tid <<= 32; |
893 | atomic64_set(v: &mlxsw_core->emad.tid, i: tid); |
894 | |
895 | INIT_LIST_HEAD(list: &mlxsw_core->emad.trans_list); |
896 | spin_lock_init(&mlxsw_core->emad.trans_list_lock); |
897 | |
898 | err = mlxsw_core_trap_register(mlxsw_core, listener: &mlxsw_emad_rx_listener, |
899 | priv: mlxsw_core); |
900 | if (err) |
901 | goto err_trap_register; |
902 | |
903 | err = mlxsw_emad_tlv_enable(mlxsw_core); |
904 | if (err) |
905 | goto err_emad_tlv_enable; |
906 | |
907 | mlxsw_core->emad.use_emad = true; |
908 | |
909 | return 0; |
910 | |
911 | err_emad_tlv_enable: |
912 | mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_emad_rx_listener, |
913 | priv: mlxsw_core); |
914 | err_trap_register: |
915 | destroy_workqueue(wq: mlxsw_core->emad_wq); |
916 | return err; |
917 | } |
918 | |
919 | static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) |
920 | { |
921 | |
922 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
923 | return; |
924 | |
925 | mlxsw_core->emad.use_emad = false; |
926 | mlxsw_emad_tlv_disable(mlxsw_core); |
927 | mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_emad_rx_listener, |
928 | priv: mlxsw_core); |
929 | destroy_workqueue(wq: mlxsw_core->emad_wq); |
930 | } |
931 | |
932 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, |
933 | u16 reg_len) |
934 | { |
935 | struct sk_buff *skb; |
936 | u16 emad_len; |
937 | |
938 | emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + |
939 | (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * |
940 | sizeof(u32) + mlxsw_core->driver->txhdr_len); |
941 | if (mlxsw_core->emad.enable_string_tlv) |
942 | emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); |
943 | if (mlxsw_core->emad.enable_latency_tlv) |
944 | emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32); |
945 | if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) |
946 | return NULL; |
947 | |
948 | skb = netdev_alloc_skb(NULL, length: emad_len); |
949 | if (!skb) |
950 | return NULL; |
951 | memset(skb->data, 0, emad_len); |
952 | skb_reserve(skb, len: emad_len); |
953 | |
954 | return skb; |
955 | } |
956 | |
957 | static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, |
958 | const struct mlxsw_reg_info *reg, |
959 | char *payload, |
960 | enum mlxsw_core_reg_access_type type, |
961 | struct mlxsw_reg_trans *trans, |
962 | struct list_head *bulk_list, |
963 | mlxsw_reg_trans_cb_t *cb, |
964 | unsigned long cb_priv, u64 tid) |
965 | { |
966 | struct sk_buff *skb; |
967 | int err; |
968 | |
969 | dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n" , |
970 | tid, reg->id, mlxsw_reg_id_str(reg->id), |
971 | mlxsw_core_reg_access_type_str(type)); |
972 | |
973 | skb = mlxsw_emad_alloc(mlxsw_core, reg_len: reg->len); |
974 | if (!skb) |
975 | return -ENOMEM; |
976 | |
977 | list_add_tail(new: &trans->bulk_list, head: bulk_list); |
978 | trans->core = mlxsw_core; |
979 | trans->tx_skb = skb; |
980 | trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; |
981 | trans->tx_info.is_emad = true; |
982 | INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); |
983 | trans->tid = tid; |
984 | init_completion(x: &trans->completion); |
985 | trans->cb = cb; |
986 | trans->cb_priv = cb_priv; |
987 | trans->reg = reg; |
988 | trans->type = type; |
989 | |
990 | mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, tid: trans->tid); |
991 | mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); |
992 | |
993 | spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
994 | list_add_tail_rcu(new: &trans->list, head: &mlxsw_core->emad.trans_list); |
995 | spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
996 | err = mlxsw_emad_transmit(mlxsw_core, trans); |
997 | if (err) |
998 | goto err_out; |
999 | return 0; |
1000 | |
1001 | err_out: |
1002 | spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
1003 | list_del_rcu(entry: &trans->list); |
1004 | spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock); |
1005 | list_del(entry: &trans->bulk_list); |
1006 | dev_kfree_skb(trans->tx_skb); |
1007 | return err; |
1008 | } |
1009 | |
1010 | /***************** |
1011 | * Core functions |
1012 | *****************/ |
1013 | |
1014 | int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) |
1015 | { |
1016 | spin_lock(lock: &mlxsw_core_driver_list_lock); |
1017 | list_add_tail(new: &mlxsw_driver->list, head: &mlxsw_core_driver_list); |
1018 | spin_unlock(lock: &mlxsw_core_driver_list_lock); |
1019 | return 0; |
1020 | } |
1021 | EXPORT_SYMBOL(mlxsw_core_driver_register); |
1022 | |
1023 | void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) |
1024 | { |
1025 | spin_lock(lock: &mlxsw_core_driver_list_lock); |
1026 | list_del(entry: &mlxsw_driver->list); |
1027 | spin_unlock(lock: &mlxsw_core_driver_list_lock); |
1028 | } |
1029 | EXPORT_SYMBOL(mlxsw_core_driver_unregister); |
1030 | |
1031 | static struct mlxsw_driver *__driver_find(const char *kind) |
1032 | { |
1033 | struct mlxsw_driver *mlxsw_driver; |
1034 | |
1035 | list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { |
1036 | if (strcmp(mlxsw_driver->kind, kind) == 0) |
1037 | return mlxsw_driver; |
1038 | } |
1039 | return NULL; |
1040 | } |
1041 | |
1042 | static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) |
1043 | { |
1044 | struct mlxsw_driver *mlxsw_driver; |
1045 | |
1046 | spin_lock(lock: &mlxsw_core_driver_list_lock); |
1047 | mlxsw_driver = __driver_find(kind); |
1048 | spin_unlock(lock: &mlxsw_core_driver_list_lock); |
1049 | return mlxsw_driver; |
1050 | } |
1051 | |
1052 | int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, |
1053 | struct mlxfw_dev *mlxfw_dev, |
1054 | const struct firmware *firmware, |
1055 | struct netlink_ext_ack *extack) |
1056 | { |
1057 | int err; |
1058 | |
1059 | mlxsw_core->fw_flash_in_progress = true; |
1060 | err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack); |
1061 | mlxsw_core->fw_flash_in_progress = false; |
1062 | |
1063 | return err; |
1064 | } |
1065 | |
1066 | struct mlxsw_core_fw_info { |
1067 | struct mlxfw_dev mlxfw_dev; |
1068 | struct mlxsw_core *mlxsw_core; |
1069 | }; |
1070 | |
1071 | static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev, |
1072 | u16 component_index, u32 *p_max_size, |
1073 | u8 *p_align_bits, u16 *p_max_write_size) |
1074 | { |
1075 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1076 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1077 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1078 | char mcqi_pl[MLXSW_REG_MCQI_LEN]; |
1079 | int err; |
1080 | |
1081 | mlxsw_reg_mcqi_pack(payload: mcqi_pl, component_index); |
1082 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), payload: mcqi_pl); |
1083 | if (err) |
1084 | return err; |
1085 | mlxsw_reg_mcqi_unpack(payload: mcqi_pl, p_cap_max_component_size: p_max_size, p_cap_log_mcda_word_size: p_align_bits, p_cap_mcda_max_write_size: p_max_write_size); |
1086 | |
1087 | *p_align_bits = max_t(u8, *p_align_bits, 2); |
1088 | *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN); |
1089 | return 0; |
1090 | } |
1091 | |
1092 | static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) |
1093 | { |
1094 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1095 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1096 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1097 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1098 | u8 control_state; |
1099 | int err; |
1100 | |
1101 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: 0, component_index: 0, update_handle: 0, component_size: 0); |
1102 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1103 | if (err) |
1104 | return err; |
1105 | |
1106 | mlxsw_reg_mcc_unpack(payload: mcc_pl, p_update_handle: fwhandle, NULL, p_control_state: &control_state); |
1107 | if (control_state != MLXFW_FSM_STATE_IDLE) |
1108 | return -EBUSY; |
1109 | |
1110 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, component_index: 0, update_handle: *fwhandle, component_size: 0); |
1111 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1112 | } |
1113 | |
1114 | static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
1115 | u16 component_index, u32 component_size) |
1116 | { |
1117 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1118 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1119 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1120 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1121 | |
1122 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, |
1123 | component_index, update_handle: fwhandle, component_size); |
1124 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1125 | } |
1126 | |
1127 | static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
1128 | u8 *data, u16 size, u32 offset) |
1129 | { |
1130 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1131 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1132 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1133 | char mcda_pl[MLXSW_REG_MCDA_LEN]; |
1134 | |
1135 | mlxsw_reg_mcda_pack(payload: mcda_pl, update_handle: fwhandle, offset, size, data); |
1136 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), payload: mcda_pl); |
1137 | } |
1138 | |
1139 | static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
1140 | u16 component_index) |
1141 | { |
1142 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1143 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1144 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1145 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1146 | |
1147 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, |
1148 | component_index, update_handle: fwhandle, component_size: 0); |
1149 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1150 | } |
1151 | |
1152 | static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
1153 | { |
1154 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1155 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1156 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1157 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1158 | |
1159 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, component_index: 0, update_handle: fwhandle, component_size: 0); |
1160 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1161 | } |
1162 | |
1163 | static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
1164 | enum mlxfw_fsm_state *fsm_state, |
1165 | enum mlxfw_fsm_state_err *fsm_state_err) |
1166 | { |
1167 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1168 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1169 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1170 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1171 | u8 control_state; |
1172 | u8 error_code; |
1173 | int err; |
1174 | |
1175 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: 0, component_index: 0, update_handle: fwhandle, component_size: 0); |
1176 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1177 | if (err) |
1178 | return err; |
1179 | |
1180 | mlxsw_reg_mcc_unpack(payload: mcc_pl, NULL, p_error_code: &error_code, p_control_state: &control_state); |
1181 | *fsm_state = control_state; |
1182 | *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX); |
1183 | return 0; |
1184 | } |
1185 | |
1186 | static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
1187 | { |
1188 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1189 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1190 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1191 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1192 | |
1193 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_CANCEL, component_index: 0, update_handle: fwhandle, component_size: 0); |
1194 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1195 | } |
1196 | |
1197 | static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
1198 | { |
1199 | struct mlxsw_core_fw_info *mlxsw_core_fw_info = |
1200 | container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev); |
1201 | struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core; |
1202 | char mcc_pl[MLXSW_REG_MCC_LEN]; |
1203 | |
1204 | mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, component_index: 0, update_handle: fwhandle, component_size: 0); |
1205 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl); |
1206 | } |
1207 | |
1208 | static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = { |
1209 | .component_query = mlxsw_core_fw_component_query, |
1210 | .fsm_lock = mlxsw_core_fw_fsm_lock, |
1211 | .fsm_component_update = mlxsw_core_fw_fsm_component_update, |
1212 | .fsm_block_download = mlxsw_core_fw_fsm_block_download, |
1213 | .fsm_component_verify = mlxsw_core_fw_fsm_component_verify, |
1214 | .fsm_activate = mlxsw_core_fw_fsm_activate, |
1215 | .fsm_query_state = mlxsw_core_fw_fsm_query_state, |
1216 | .fsm_cancel = mlxsw_core_fw_fsm_cancel, |
1217 | .fsm_release = mlxsw_core_fw_fsm_release, |
1218 | }; |
1219 | |
1220 | static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core, |
1221 | const struct firmware *firmware, |
1222 | struct netlink_ext_ack *extack) |
1223 | { |
1224 | struct mlxsw_core_fw_info mlxsw_core_fw_info = { |
1225 | .mlxfw_dev = { |
1226 | .ops = &mlxsw_core_fw_mlxsw_dev_ops, |
1227 | .psid = mlxsw_core->bus_info->psid, |
1228 | .psid_size = strlen(mlxsw_core->bus_info->psid), |
1229 | .devlink = priv_to_devlink(priv: mlxsw_core), |
1230 | }, |
1231 | .mlxsw_core = mlxsw_core |
1232 | }; |
1233 | |
1234 | return mlxsw_core_fw_flash(mlxsw_core, mlxfw_dev: &mlxsw_core_fw_info.mlxfw_dev, |
1235 | firmware, extack); |
1236 | } |
1237 | |
1238 | static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core, |
1239 | const struct mlxsw_bus_info *mlxsw_bus_info, |
1240 | const struct mlxsw_fw_rev *req_rev, |
1241 | const char *filename) |
1242 | { |
1243 | const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev; |
1244 | union devlink_param_value value; |
1245 | const struct firmware *firmware; |
1246 | int err; |
1247 | |
1248 | /* Don't check if driver does not require it */ |
1249 | if (!req_rev || !filename) |
1250 | return 0; |
1251 | |
1252 | /* Don't check if devlink 'fw_load_policy' param is 'flash' */ |
1253 | err = devl_param_driverinit_value_get(devlink: priv_to_devlink(priv: mlxsw_core), |
1254 | param_id: DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, |
1255 | val: &value); |
1256 | if (err) |
1257 | return err; |
1258 | if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) |
1259 | return 0; |
1260 | |
1261 | /* Validate driver & FW are compatible */ |
1262 | if (rev->major != req_rev->major) { |
1263 | WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n" , |
1264 | rev->major, req_rev->major); |
1265 | return -EINVAL; |
1266 | } |
1267 | if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) |
1268 | return 0; |
1269 | |
1270 | dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n" , |
1271 | rev->major, rev->minor, rev->subminor, req_rev->major, |
1272 | req_rev->minor, req_rev->subminor); |
1273 | dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n" , filename); |
1274 | |
1275 | err = request_firmware_direct(fw: &firmware, name: filename, device: mlxsw_bus_info->dev); |
1276 | if (err) { |
1277 | dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n" , filename); |
1278 | return err; |
1279 | } |
1280 | |
1281 | err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL); |
1282 | release_firmware(fw: firmware); |
1283 | if (err) |
1284 | dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n" ); |
1285 | |
1286 | /* On FW flash success, tell the caller FW reset is needed |
1287 | * if current FW supports it. |
1288 | */ |
1289 | if (rev->minor >= req_rev->can_reset_minor) |
1290 | return err ? err : -EAGAIN; |
1291 | else |
1292 | return 0; |
1293 | } |
1294 | |
1295 | static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core, |
1296 | struct devlink_flash_update_params *params, |
1297 | struct netlink_ext_ack *extack) |
1298 | { |
1299 | return mlxsw_core_dev_fw_flash(mlxsw_core, firmware: params->fw, extack); |
1300 | } |
1301 | |
1302 | static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, |
1303 | union devlink_param_value val, |
1304 | struct netlink_ext_ack *extack) |
1305 | { |
1306 | if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER && |
1307 | val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) { |
1308 | NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'" ); |
1309 | return -EINVAL; |
1310 | } |
1311 | |
1312 | return 0; |
1313 | } |
1314 | |
1315 | static const struct devlink_param mlxsw_core_fw_devlink_params[] = { |
1316 | DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, |
1317 | mlxsw_core_devlink_param_fw_load_policy_validate), |
1318 | }; |
1319 | |
1320 | static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core) |
1321 | { |
1322 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
1323 | union devlink_param_value value; |
1324 | int err; |
1325 | |
1326 | err = devl_params_register(devlink, params: mlxsw_core_fw_devlink_params, |
1327 | ARRAY_SIZE(mlxsw_core_fw_devlink_params)); |
1328 | if (err) |
1329 | return err; |
1330 | |
1331 | value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; |
1332 | devl_param_driverinit_value_set(devlink, |
1333 | param_id: DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, |
1334 | init_val: value); |
1335 | return 0; |
1336 | } |
1337 | |
1338 | static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core) |
1339 | { |
1340 | devl_params_unregister(devlink: priv_to_devlink(priv: mlxsw_core), params: mlxsw_core_fw_devlink_params, |
1341 | ARRAY_SIZE(mlxsw_core_fw_devlink_params)); |
1342 | } |
1343 | |
1344 | static void *__dl_port(struct devlink_port *devlink_port) |
1345 | { |
1346 | return container_of(devlink_port, struct mlxsw_core_port, devlink_port); |
1347 | } |
1348 | |
1349 | static int mlxsw_devlink_port_split(struct devlink *devlink, |
1350 | struct devlink_port *port, |
1351 | unsigned int count, |
1352 | struct netlink_ext_ack *extack) |
1353 | { |
1354 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port: port); |
1355 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1356 | |
1357 | if (!mlxsw_core->driver->port_split) |
1358 | return -EOPNOTSUPP; |
1359 | return mlxsw_core->driver->port_split(mlxsw_core, |
1360 | mlxsw_core_port->local_port, |
1361 | count, extack); |
1362 | } |
1363 | |
1364 | static int mlxsw_devlink_port_unsplit(struct devlink *devlink, |
1365 | struct devlink_port *port, |
1366 | struct netlink_ext_ack *extack) |
1367 | { |
1368 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port: port); |
1369 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1370 | |
1371 | if (!mlxsw_core->driver->port_unsplit) |
1372 | return -EOPNOTSUPP; |
1373 | return mlxsw_core->driver->port_unsplit(mlxsw_core, |
1374 | mlxsw_core_port->local_port, |
1375 | extack); |
1376 | } |
1377 | |
1378 | static int |
1379 | mlxsw_devlink_sb_pool_get(struct devlink *devlink, |
1380 | unsigned int sb_index, u16 pool_index, |
1381 | struct devlink_sb_pool_info *pool_info) |
1382 | { |
1383 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1384 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1385 | |
1386 | if (!mlxsw_driver->sb_pool_get) |
1387 | return -EOPNOTSUPP; |
1388 | return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, |
1389 | pool_index, pool_info); |
1390 | } |
1391 | |
1392 | static int |
1393 | mlxsw_devlink_sb_pool_set(struct devlink *devlink, |
1394 | unsigned int sb_index, u16 pool_index, u32 size, |
1395 | enum devlink_sb_threshold_type threshold_type, |
1396 | struct netlink_ext_ack *extack) |
1397 | { |
1398 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1399 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1400 | |
1401 | if (!mlxsw_driver->sb_pool_set) |
1402 | return -EOPNOTSUPP; |
1403 | return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, |
1404 | pool_index, size, threshold_type, |
1405 | extack); |
1406 | } |
1407 | |
1408 | static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, |
1409 | unsigned int sb_index, u16 pool_index, |
1410 | u32 *p_threshold) |
1411 | { |
1412 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1413 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1414 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1415 | |
1416 | if (!mlxsw_driver->sb_port_pool_get || |
1417 | !mlxsw_core_port_check(mlxsw_core_port)) |
1418 | return -EOPNOTSUPP; |
1419 | return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, |
1420 | pool_index, p_threshold); |
1421 | } |
1422 | |
1423 | static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, |
1424 | unsigned int sb_index, u16 pool_index, |
1425 | u32 threshold, |
1426 | struct netlink_ext_ack *extack) |
1427 | { |
1428 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1429 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1430 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1431 | |
1432 | if (!mlxsw_driver->sb_port_pool_set || |
1433 | !mlxsw_core_port_check(mlxsw_core_port)) |
1434 | return -EOPNOTSUPP; |
1435 | return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, |
1436 | pool_index, threshold, extack); |
1437 | } |
1438 | |
1439 | static int |
1440 | mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, |
1441 | unsigned int sb_index, u16 tc_index, |
1442 | enum devlink_sb_pool_type pool_type, |
1443 | u16 *p_pool_index, u32 *p_threshold) |
1444 | { |
1445 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1446 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1447 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1448 | |
1449 | if (!mlxsw_driver->sb_tc_pool_bind_get || |
1450 | !mlxsw_core_port_check(mlxsw_core_port)) |
1451 | return -EOPNOTSUPP; |
1452 | return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, |
1453 | tc_index, pool_type, |
1454 | p_pool_index, p_threshold); |
1455 | } |
1456 | |
1457 | static int |
1458 | mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, |
1459 | unsigned int sb_index, u16 tc_index, |
1460 | enum devlink_sb_pool_type pool_type, |
1461 | u16 pool_index, u32 threshold, |
1462 | struct netlink_ext_ack *extack) |
1463 | { |
1464 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1465 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1466 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1467 | |
1468 | if (!mlxsw_driver->sb_tc_pool_bind_set || |
1469 | !mlxsw_core_port_check(mlxsw_core_port)) |
1470 | return -EOPNOTSUPP; |
1471 | return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, |
1472 | tc_index, pool_type, |
1473 | pool_index, threshold, extack); |
1474 | } |
1475 | |
1476 | static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, |
1477 | unsigned int sb_index) |
1478 | { |
1479 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1480 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1481 | |
1482 | if (!mlxsw_driver->sb_occ_snapshot) |
1483 | return -EOPNOTSUPP; |
1484 | return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); |
1485 | } |
1486 | |
1487 | static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, |
1488 | unsigned int sb_index) |
1489 | { |
1490 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1491 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1492 | |
1493 | if (!mlxsw_driver->sb_occ_max_clear) |
1494 | return -EOPNOTSUPP; |
1495 | return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); |
1496 | } |
1497 | |
1498 | static int |
1499 | mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, |
1500 | unsigned int sb_index, u16 pool_index, |
1501 | u32 *p_cur, u32 *p_max) |
1502 | { |
1503 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1504 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1505 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1506 | |
1507 | if (!mlxsw_driver->sb_occ_port_pool_get || |
1508 | !mlxsw_core_port_check(mlxsw_core_port)) |
1509 | return -EOPNOTSUPP; |
1510 | return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, |
1511 | pool_index, p_cur, p_max); |
1512 | } |
1513 | |
1514 | static int |
1515 | mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, |
1516 | unsigned int sb_index, u16 tc_index, |
1517 | enum devlink_sb_pool_type pool_type, |
1518 | u32 *p_cur, u32 *p_max) |
1519 | { |
1520 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink); |
1521 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1522 | struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); |
1523 | |
1524 | if (!mlxsw_driver->sb_occ_tc_port_bind_get || |
1525 | !mlxsw_core_port_check(mlxsw_core_port)) |
1526 | return -EOPNOTSUPP; |
1527 | return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, |
1528 | sb_index, tc_index, |
1529 | pool_type, p_cur, p_max); |
1530 | } |
1531 | |
1532 | static int |
1533 | mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, |
1534 | struct netlink_ext_ack *extack) |
1535 | { |
1536 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1537 | char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE]; |
1538 | u32 hw_rev, fw_major, fw_minor, fw_sub_minor; |
1539 | char mgir_pl[MLXSW_REG_MGIR_LEN]; |
1540 | char buf[32]; |
1541 | int err; |
1542 | |
1543 | mlxsw_reg_mgir_pack(payload: mgir_pl); |
1544 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), payload: mgir_pl); |
1545 | if (err) |
1546 | return err; |
1547 | mlxsw_reg_mgir_unpack(payload: mgir_pl, hw_rev: &hw_rev, fw_info_psid, fw_major: &fw_major, |
1548 | fw_minor: &fw_minor, fw_sub_minor: &fw_sub_minor); |
1549 | |
1550 | sprintf(buf, fmt: "%X" , hw_rev); |
1551 | err = devlink_info_version_fixed_put(req, version_name: "hw.revision" , version_value: buf); |
1552 | if (err) |
1553 | return err; |
1554 | |
1555 | err = devlink_info_version_fixed_put(req, |
1556 | DEVLINK_INFO_VERSION_GENERIC_FW_PSID, |
1557 | version_value: fw_info_psid); |
1558 | if (err) |
1559 | return err; |
1560 | |
1561 | sprintf(buf, fmt: "%d.%d.%d" , fw_major, fw_minor, fw_sub_minor); |
1562 | err = devlink_info_version_running_put(req, version_name: "fw.version" , version_value: buf); |
1563 | if (err) |
1564 | return err; |
1565 | |
1566 | return devlink_info_version_running_put(req, |
1567 | DEVLINK_INFO_VERSION_GENERIC_FW, |
1568 | version_value: buf); |
1569 | } |
1570 | |
1571 | static int |
1572 | mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink, |
1573 | bool netns_change, enum devlink_reload_action action, |
1574 | enum devlink_reload_limit limit, |
1575 | struct netlink_ext_ack *extack) |
1576 | { |
1577 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1578 | |
1579 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) |
1580 | return -EOPNOTSUPP; |
1581 | |
1582 | mlxsw_core_bus_device_unregister(mlxsw_core, reload: true); |
1583 | return 0; |
1584 | } |
1585 | |
1586 | static int |
1587 | mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action, |
1588 | enum devlink_reload_limit limit, u32 *actions_performed, |
1589 | struct netlink_ext_ack *extack) |
1590 | { |
1591 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1592 | int err; |
1593 | |
1594 | *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | |
1595 | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); |
1596 | err = mlxsw_core_bus_device_register(mlxsw_bus_info: mlxsw_core->bus_info, |
1597 | mlxsw_bus: mlxsw_core->bus, |
1598 | bus_priv: mlxsw_core->bus_priv, reload: true, |
1599 | devlink, extack); |
1600 | return err; |
1601 | } |
1602 | |
1603 | static int mlxsw_devlink_flash_update(struct devlink *devlink, |
1604 | struct devlink_flash_update_params *params, |
1605 | struct netlink_ext_ack *extack) |
1606 | { |
1607 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1608 | |
1609 | return mlxsw_core_fw_flash_update(mlxsw_core, params, extack); |
1610 | } |
1611 | |
1612 | static int mlxsw_devlink_trap_init(struct devlink *devlink, |
1613 | const struct devlink_trap *trap, |
1614 | void *trap_ctx) |
1615 | { |
1616 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1617 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1618 | |
1619 | if (!mlxsw_driver->trap_init) |
1620 | return -EOPNOTSUPP; |
1621 | return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx); |
1622 | } |
1623 | |
1624 | static void mlxsw_devlink_trap_fini(struct devlink *devlink, |
1625 | const struct devlink_trap *trap, |
1626 | void *trap_ctx) |
1627 | { |
1628 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1629 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1630 | |
1631 | if (!mlxsw_driver->trap_fini) |
1632 | return; |
1633 | mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx); |
1634 | } |
1635 | |
1636 | static int mlxsw_devlink_trap_action_set(struct devlink *devlink, |
1637 | const struct devlink_trap *trap, |
1638 | enum devlink_trap_action action, |
1639 | struct netlink_ext_ack *extack) |
1640 | { |
1641 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1642 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1643 | |
1644 | if (!mlxsw_driver->trap_action_set) |
1645 | return -EOPNOTSUPP; |
1646 | return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); |
1647 | } |
1648 | |
1649 | static int |
1650 | mlxsw_devlink_trap_group_init(struct devlink *devlink, |
1651 | const struct devlink_trap_group *group) |
1652 | { |
1653 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1654 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1655 | |
1656 | if (!mlxsw_driver->trap_group_init) |
1657 | return -EOPNOTSUPP; |
1658 | return mlxsw_driver->trap_group_init(mlxsw_core, group); |
1659 | } |
1660 | |
1661 | static int |
1662 | mlxsw_devlink_trap_group_set(struct devlink *devlink, |
1663 | const struct devlink_trap_group *group, |
1664 | const struct devlink_trap_policer *policer, |
1665 | struct netlink_ext_ack *extack) |
1666 | { |
1667 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1668 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1669 | |
1670 | if (!mlxsw_driver->trap_group_set) |
1671 | return -EOPNOTSUPP; |
1672 | return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); |
1673 | } |
1674 | |
1675 | static int |
1676 | mlxsw_devlink_trap_policer_init(struct devlink *devlink, |
1677 | const struct devlink_trap_policer *policer) |
1678 | { |
1679 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1680 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1681 | |
1682 | if (!mlxsw_driver->trap_policer_init) |
1683 | return -EOPNOTSUPP; |
1684 | return mlxsw_driver->trap_policer_init(mlxsw_core, policer); |
1685 | } |
1686 | |
1687 | static void |
1688 | mlxsw_devlink_trap_policer_fini(struct devlink *devlink, |
1689 | const struct devlink_trap_policer *policer) |
1690 | { |
1691 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1692 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1693 | |
1694 | if (!mlxsw_driver->trap_policer_fini) |
1695 | return; |
1696 | mlxsw_driver->trap_policer_fini(mlxsw_core, policer); |
1697 | } |
1698 | |
1699 | static int |
1700 | mlxsw_devlink_trap_policer_set(struct devlink *devlink, |
1701 | const struct devlink_trap_policer *policer, |
1702 | u64 rate, u64 burst, |
1703 | struct netlink_ext_ack *extack) |
1704 | { |
1705 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1706 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1707 | |
1708 | if (!mlxsw_driver->trap_policer_set) |
1709 | return -EOPNOTSUPP; |
1710 | return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst, |
1711 | extack); |
1712 | } |
1713 | |
1714 | static int |
1715 | mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink, |
1716 | const struct devlink_trap_policer *policer, |
1717 | u64 *p_drops) |
1718 | { |
1719 | struct mlxsw_core *mlxsw_core = devlink_priv(devlink); |
1720 | struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; |
1721 | |
1722 | if (!mlxsw_driver->trap_policer_counter_get) |
1723 | return -EOPNOTSUPP; |
1724 | return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer, |
1725 | p_drops); |
1726 | } |
1727 | |
1728 | static const struct devlink_ops mlxsw_devlink_ops = { |
1729 | .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | |
1730 | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), |
1731 | .reload_down = mlxsw_devlink_core_bus_device_reload_down, |
1732 | .reload_up = mlxsw_devlink_core_bus_device_reload_up, |
1733 | .sb_pool_get = mlxsw_devlink_sb_pool_get, |
1734 | .sb_pool_set = mlxsw_devlink_sb_pool_set, |
1735 | .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, |
1736 | .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, |
1737 | .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, |
1738 | .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, |
1739 | .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, |
1740 | .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, |
1741 | .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, |
1742 | .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, |
1743 | .info_get = mlxsw_devlink_info_get, |
1744 | .flash_update = mlxsw_devlink_flash_update, |
1745 | .trap_init = mlxsw_devlink_trap_init, |
1746 | .trap_fini = mlxsw_devlink_trap_fini, |
1747 | .trap_action_set = mlxsw_devlink_trap_action_set, |
1748 | .trap_group_init = mlxsw_devlink_trap_group_init, |
1749 | .trap_group_set = mlxsw_devlink_trap_group_set, |
1750 | .trap_policer_init = mlxsw_devlink_trap_policer_init, |
1751 | .trap_policer_fini = mlxsw_devlink_trap_policer_fini, |
1752 | .trap_policer_set = mlxsw_devlink_trap_policer_set, |
1753 | .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get, |
1754 | }; |
1755 | |
1756 | static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core) |
1757 | { |
1758 | return mlxsw_core_fw_params_register(mlxsw_core); |
1759 | } |
1760 | |
1761 | static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core) |
1762 | { |
1763 | mlxsw_core_fw_params_unregister(mlxsw_core); |
1764 | } |
1765 | |
1766 | struct mlxsw_core_health_event { |
1767 | struct mlxsw_core *mlxsw_core; |
1768 | char mfde_pl[MLXSW_REG_MFDE_LEN]; |
1769 | struct work_struct work; |
1770 | }; |
1771 | |
1772 | static void mlxsw_core_health_event_work(struct work_struct *work) |
1773 | { |
1774 | struct mlxsw_core_health_event *event; |
1775 | struct mlxsw_core *mlxsw_core; |
1776 | |
1777 | event = container_of(work, struct mlxsw_core_health_event, work); |
1778 | mlxsw_core = event->mlxsw_core; |
1779 | devlink_health_report(reporter: mlxsw_core->health.fw_fatal, msg: "FW fatal event occurred" , |
1780 | priv_ctx: event->mfde_pl); |
1781 | kfree(objp: event); |
1782 | } |
1783 | |
1784 | static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, |
1785 | char *mfde_pl, void *priv) |
1786 | { |
1787 | struct mlxsw_core_health_event *event; |
1788 | struct mlxsw_core *mlxsw_core = priv; |
1789 | |
1790 | event = kmalloc(size: sizeof(*event), GFP_ATOMIC); |
1791 | if (!event) |
1792 | return; |
1793 | event->mlxsw_core = mlxsw_core; |
1794 | memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl)); |
1795 | INIT_WORK(&event->work, mlxsw_core_health_event_work); |
1796 | mlxsw_core_schedule_work(work: &event->work); |
1797 | } |
1798 | |
1799 | static const struct mlxsw_listener mlxsw_core_health_listener = |
1800 | MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); |
1801 | |
1802 | static void |
1803 | mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, |
1804 | struct devlink_fmsg *fmsg) |
1805 | { |
1806 | u32 val, tile_v; |
1807 | |
1808 | val = mlxsw_reg_mfde_fatal_cause_id_get(buf: mfde_pl); |
1809 | devlink_fmsg_u32_pair_put(fmsg, name: "cause_id" , value: val); |
1810 | tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(buf: mfde_pl); |
1811 | if (tile_v) { |
1812 | val = mlxsw_reg_mfde_fatal_cause_tile_index_get(buf: mfde_pl); |
1813 | devlink_fmsg_u8_pair_put(fmsg, name: "tile_index" , value: val); |
1814 | } |
1815 | } |
1816 | |
1817 | static void |
1818 | mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, |
1819 | struct devlink_fmsg *fmsg) |
1820 | { |
1821 | u32 val, tile_v; |
1822 | |
1823 | val = mlxsw_reg_mfde_fw_assert_var0_get(buf: mfde_pl); |
1824 | devlink_fmsg_u32_pair_put(fmsg, name: "var0" , value: val); |
1825 | val = mlxsw_reg_mfde_fw_assert_var1_get(buf: mfde_pl); |
1826 | devlink_fmsg_u32_pair_put(fmsg, name: "var1" , value: val); |
1827 | val = mlxsw_reg_mfde_fw_assert_var2_get(buf: mfde_pl); |
1828 | devlink_fmsg_u32_pair_put(fmsg, name: "var2" , value: val); |
1829 | val = mlxsw_reg_mfde_fw_assert_var3_get(buf: mfde_pl); |
1830 | devlink_fmsg_u32_pair_put(fmsg, name: "var3" , value: val); |
1831 | val = mlxsw_reg_mfde_fw_assert_var4_get(buf: mfde_pl); |
1832 | devlink_fmsg_u32_pair_put(fmsg, name: "var4" , value: val); |
1833 | val = mlxsw_reg_mfde_fw_assert_existptr_get(buf: mfde_pl); |
1834 | devlink_fmsg_u32_pair_put(fmsg, name: "existptr" , value: val); |
1835 | val = mlxsw_reg_mfde_fw_assert_callra_get(buf: mfde_pl); |
1836 | devlink_fmsg_u32_pair_put(fmsg, name: "callra" , value: val); |
1837 | val = mlxsw_reg_mfde_fw_assert_oe_get(buf: mfde_pl); |
1838 | devlink_fmsg_bool_pair_put(fmsg, name: "old_event" , value: val); |
1839 | tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(buf: mfde_pl); |
1840 | if (tile_v) { |
1841 | val = mlxsw_reg_mfde_fw_assert_tile_index_get(buf: mfde_pl); |
1842 | devlink_fmsg_u8_pair_put(fmsg, name: "tile_index" , value: val); |
1843 | } |
1844 | val = mlxsw_reg_mfde_fw_assert_ext_synd_get(buf: mfde_pl); |
1845 | devlink_fmsg_u32_pair_put(fmsg, name: "ext_synd" , value: val); |
1846 | } |
1847 | |
1848 | static void |
1849 | mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, |
1850 | struct devlink_fmsg *fmsg) |
1851 | { |
1852 | u32 val; |
1853 | |
1854 | val = mlxsw_reg_mfde_kvd_im_stop_oe_get(buf: mfde_pl); |
1855 | devlink_fmsg_bool_pair_put(fmsg, name: "old_event" , value: val); |
1856 | val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(buf: mfde_pl); |
1857 | devlink_fmsg_u32_pair_put(fmsg, name: "pipes_mask" , value: val); |
1858 | } |
1859 | |
1860 | static void |
1861 | mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, |
1862 | struct devlink_fmsg *fmsg) |
1863 | { |
1864 | u32 val; |
1865 | |
1866 | val = mlxsw_reg_mfde_crspace_to_log_address_get(buf: mfde_pl); |
1867 | devlink_fmsg_u32_pair_put(fmsg, name: "log_address" , value: val); |
1868 | val = mlxsw_reg_mfde_crspace_to_oe_get(buf: mfde_pl); |
1869 | devlink_fmsg_bool_pair_put(fmsg, name: "old_event" , value: val); |
1870 | val = mlxsw_reg_mfde_crspace_to_log_id_get(buf: mfde_pl); |
1871 | devlink_fmsg_u8_pair_put(fmsg, name: "log_irisc_id" , value: val); |
1872 | val = mlxsw_reg_mfde_crspace_to_log_ip_get(buf: mfde_pl); |
1873 | devlink_fmsg_u64_pair_put(fmsg, name: "log_ip" , value: val); |
1874 | } |
1875 | |
1876 | static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, |
1877 | struct devlink_fmsg *fmsg, void *priv_ctx, |
1878 | struct netlink_ext_ack *extack) |
1879 | { |
1880 | char *mfde_pl = priv_ctx; |
1881 | char *val_str; |
1882 | u8 event_id; |
1883 | u32 val; |
1884 | |
1885 | if (!priv_ctx) |
1886 | /* User-triggered dumps are not possible */ |
1887 | return -EOPNOTSUPP; |
1888 | |
1889 | val = mlxsw_reg_mfde_irisc_id_get(buf: mfde_pl); |
1890 | devlink_fmsg_u8_pair_put(fmsg, name: "irisc_id" , value: val); |
1891 | |
1892 | devlink_fmsg_arr_pair_nest_start(fmsg, name: "event" ); |
1893 | event_id = mlxsw_reg_mfde_event_id_get(buf: mfde_pl); |
1894 | devlink_fmsg_u32_pair_put(fmsg, name: "id" , value: event_id); |
1895 | switch (event_id) { |
1896 | case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: |
1897 | val_str = "CR space timeout" ; |
1898 | break; |
1899 | case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: |
1900 | val_str = "KVD insertion machine stopped" ; |
1901 | break; |
1902 | case MLXSW_REG_MFDE_EVENT_ID_TEST: |
1903 | val_str = "Test" ; |
1904 | break; |
1905 | case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: |
1906 | val_str = "FW assert" ; |
1907 | break; |
1908 | case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: |
1909 | val_str = "Fatal cause" ; |
1910 | break; |
1911 | default: |
1912 | val_str = NULL; |
1913 | } |
1914 | if (val_str) |
1915 | devlink_fmsg_string_pair_put(fmsg, name: "desc" , value: val_str); |
1916 | devlink_fmsg_arr_pair_nest_end(fmsg); |
1917 | |
1918 | devlink_fmsg_arr_pair_nest_start(fmsg, name: "severity" ); |
1919 | val = mlxsw_reg_mfde_severity_get(buf: mfde_pl); |
1920 | devlink_fmsg_u8_pair_put(fmsg, name: "id" , value: val); |
1921 | switch (val) { |
1922 | case MLXSW_REG_MFDE_SEVERITY_FATL: |
1923 | val_str = "Fatal" ; |
1924 | break; |
1925 | case MLXSW_REG_MFDE_SEVERITY_NRML: |
1926 | val_str = "Normal" ; |
1927 | break; |
1928 | case MLXSW_REG_MFDE_SEVERITY_INTR: |
1929 | val_str = "Debug" ; |
1930 | break; |
1931 | default: |
1932 | val_str = NULL; |
1933 | } |
1934 | if (val_str) |
1935 | devlink_fmsg_string_pair_put(fmsg, name: "desc" , value: val_str); |
1936 | devlink_fmsg_arr_pair_nest_end(fmsg); |
1937 | |
1938 | val = mlxsw_reg_mfde_method_get(buf: mfde_pl); |
1939 | switch (val) { |
1940 | case MLXSW_REG_MFDE_METHOD_QUERY: |
1941 | val_str = "query" ; |
1942 | break; |
1943 | case MLXSW_REG_MFDE_METHOD_WRITE: |
1944 | val_str = "write" ; |
1945 | break; |
1946 | default: |
1947 | val_str = NULL; |
1948 | } |
1949 | if (val_str) |
1950 | devlink_fmsg_string_pair_put(fmsg, name: "method" , value: val_str); |
1951 | |
1952 | val = mlxsw_reg_mfde_long_process_get(buf: mfde_pl); |
1953 | devlink_fmsg_bool_pair_put(fmsg, name: "long_process" , value: val); |
1954 | |
1955 | val = mlxsw_reg_mfde_command_type_get(buf: mfde_pl); |
1956 | switch (val) { |
1957 | case MLXSW_REG_MFDE_COMMAND_TYPE_MAD: |
1958 | val_str = "mad" ; |
1959 | break; |
1960 | case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD: |
1961 | val_str = "emad" ; |
1962 | break; |
1963 | case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF: |
1964 | val_str = "cmdif" ; |
1965 | break; |
1966 | default: |
1967 | val_str = NULL; |
1968 | } |
1969 | if (val_str) |
1970 | devlink_fmsg_string_pair_put(fmsg, name: "command_type" , value: val_str); |
1971 | |
1972 | val = mlxsw_reg_mfde_reg_attr_id_get(buf: mfde_pl); |
1973 | devlink_fmsg_u32_pair_put(fmsg, name: "reg_attr_id" , value: val); |
1974 | |
1975 | switch (event_id) { |
1976 | case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: |
1977 | mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg); |
1978 | break; |
1979 | case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: |
1980 | mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg); |
1981 | break; |
1982 | case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: |
1983 | mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); |
1984 | break; |
1985 | case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: |
1986 | mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg); |
1987 | break; |
1988 | } |
1989 | |
1990 | return 0; |
1991 | } |
1992 | |
1993 | static int |
1994 | mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter, |
1995 | struct netlink_ext_ack *extack) |
1996 | { |
1997 | struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter); |
1998 | char mfgd_pl[MLXSW_REG_MFGD_LEN]; |
1999 | int err; |
2000 | |
2001 | /* Read the register first to make sure no other bits are changed. */ |
2002 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl); |
2003 | if (err) |
2004 | return err; |
2005 | mlxsw_reg_mfgd_trigger_test_set(buf: mfgd_pl, val: true); |
2006 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl); |
2007 | } |
2008 | |
2009 | static const struct devlink_health_reporter_ops |
2010 | mlxsw_core_health_fw_fatal_ops = { |
2011 | .name = "fw_fatal" , |
2012 | .dump = mlxsw_core_health_fw_fatal_dump, |
2013 | .test = mlxsw_core_health_fw_fatal_test, |
2014 | }; |
2015 | |
2016 | static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core, |
2017 | bool enable) |
2018 | { |
2019 | char mfgd_pl[MLXSW_REG_MFGD_LEN]; |
2020 | int err; |
2021 | |
2022 | /* Read the register first to make sure no other bits are changed. */ |
2023 | err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl); |
2024 | if (err) |
2025 | return err; |
2026 | mlxsw_reg_mfgd_fatal_event_mode_set(buf: mfgd_pl, val: enable); |
2027 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl); |
2028 | } |
2029 | |
2030 | static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) |
2031 | { |
2032 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
2033 | struct devlink_health_reporter *fw_fatal; |
2034 | int err; |
2035 | |
2036 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
2037 | return 0; |
2038 | |
2039 | fw_fatal = devl_health_reporter_create(devlink, ops: &mlxsw_core_health_fw_fatal_ops, |
2040 | graceful_period: 0, priv: mlxsw_core); |
2041 | if (IS_ERR(ptr: fw_fatal)) { |
2042 | dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter" ); |
2043 | return PTR_ERR(ptr: fw_fatal); |
2044 | } |
2045 | mlxsw_core->health.fw_fatal = fw_fatal; |
2046 | |
2047 | err = mlxsw_core_trap_register(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core); |
2048 | if (err) |
2049 | goto err_trap_register; |
2050 | |
2051 | err = mlxsw_core_health_fw_fatal_config(mlxsw_core, enable: true); |
2052 | if (err) |
2053 | goto err_fw_fatal_config; |
2054 | |
2055 | return 0; |
2056 | |
2057 | err_fw_fatal_config: |
2058 | mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core); |
2059 | err_trap_register: |
2060 | devl_health_reporter_destroy(reporter: mlxsw_core->health.fw_fatal); |
2061 | return err; |
2062 | } |
2063 | |
2064 | static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) |
2065 | { |
2066 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
2067 | return; |
2068 | |
2069 | mlxsw_core_health_fw_fatal_config(mlxsw_core, enable: false); |
2070 | mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core); |
2071 | /* Make sure there is no more event work scheduled */ |
2072 | mlxsw_core_flush_owq(); |
2073 | devl_health_reporter_destroy(reporter: mlxsw_core->health.fw_fatal); |
2074 | } |
2075 | |
2076 | static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core) |
2077 | { |
2078 | INIT_LIST_HEAD(list: &mlxsw_core->irq_event_handler_list); |
2079 | mutex_init(&mlxsw_core->irq_event_handler_lock); |
2080 | } |
2081 | |
2082 | static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core) |
2083 | { |
2084 | mutex_destroy(lock: &mlxsw_core->irq_event_handler_lock); |
2085 | WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list)); |
2086 | } |
2087 | |
2088 | static int |
2089 | __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, |
2090 | const struct mlxsw_bus *mlxsw_bus, |
2091 | void *bus_priv, bool reload, |
2092 | struct devlink *devlink, |
2093 | struct netlink_ext_ack *extack) |
2094 | { |
2095 | const char *device_kind = mlxsw_bus_info->device_kind; |
2096 | struct mlxsw_core *mlxsw_core; |
2097 | struct mlxsw_driver *mlxsw_driver; |
2098 | size_t alloc_size; |
2099 | u16 max_lag; |
2100 | int err; |
2101 | |
2102 | mlxsw_driver = mlxsw_core_driver_get(kind: device_kind); |
2103 | if (!mlxsw_driver) |
2104 | return -EINVAL; |
2105 | |
2106 | if (!reload) { |
2107 | alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; |
2108 | devlink = devlink_alloc(ops: &mlxsw_devlink_ops, priv_size: alloc_size, |
2109 | dev: mlxsw_bus_info->dev); |
2110 | if (!devlink) { |
2111 | err = -ENOMEM; |
2112 | goto err_devlink_alloc; |
2113 | } |
2114 | devl_lock(devlink); |
2115 | devl_register(devlink); |
2116 | } |
2117 | |
2118 | mlxsw_core = devlink_priv(devlink); |
2119 | INIT_LIST_HEAD(list: &mlxsw_core->rx_listener_list); |
2120 | INIT_LIST_HEAD(list: &mlxsw_core->event_listener_list); |
2121 | mlxsw_core->driver = mlxsw_driver; |
2122 | mlxsw_core->bus = mlxsw_bus; |
2123 | mlxsw_core->bus_priv = bus_priv; |
2124 | mlxsw_core->bus_info = mlxsw_bus_info; |
2125 | mlxsw_core_irq_event_handler_init(mlxsw_core); |
2126 | |
2127 | err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, |
2128 | &mlxsw_core->res); |
2129 | if (err) |
2130 | goto err_bus_init; |
2131 | |
2132 | if (mlxsw_driver->resources_register && !reload) { |
2133 | err = mlxsw_driver->resources_register(mlxsw_core); |
2134 | if (err) |
2135 | goto err_register_resources; |
2136 | } |
2137 | |
2138 | err = mlxsw_ports_init(mlxsw_core, reload); |
2139 | if (err) |
2140 | goto err_ports_init; |
2141 | |
2142 | err = mlxsw_core_max_lag(mlxsw_core, &max_lag); |
2143 | if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { |
2144 | alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag * |
2145 | MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); |
2146 | mlxsw_core->lag.mapping = kzalloc(size: alloc_size, GFP_KERNEL); |
2147 | if (!mlxsw_core->lag.mapping) { |
2148 | err = -ENOMEM; |
2149 | goto err_alloc_lag_mapping; |
2150 | } |
2151 | } |
2152 | |
2153 | err = mlxsw_core_trap_groups_set(mlxsw_core); |
2154 | if (err) |
2155 | goto err_trap_groups_set; |
2156 | |
2157 | err = mlxsw_emad_init(mlxsw_core); |
2158 | if (err) |
2159 | goto err_emad_init; |
2160 | |
2161 | if (!reload) { |
2162 | err = mlxsw_core_params_register(mlxsw_core); |
2163 | if (err) |
2164 | goto err_register_params; |
2165 | } |
2166 | |
2167 | err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, req_rev: mlxsw_driver->fw_req_rev, |
2168 | filename: mlxsw_driver->fw_filename); |
2169 | if (err) |
2170 | goto err_fw_rev_validate; |
2171 | |
2172 | err = mlxsw_linecards_init(mlxsw_core, bus_info: mlxsw_bus_info); |
2173 | if (err) |
2174 | goto err_linecards_init; |
2175 | |
2176 | err = mlxsw_core_health_init(mlxsw_core); |
2177 | if (err) |
2178 | goto err_health_init; |
2179 | |
2180 | err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, p_hwmon: &mlxsw_core->hwmon); |
2181 | if (err) |
2182 | goto err_hwmon_init; |
2183 | |
2184 | err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, |
2185 | p_thermal: &mlxsw_core->thermal); |
2186 | if (err) |
2187 | goto err_thermal_init; |
2188 | |
2189 | err = mlxsw_env_init(core: mlxsw_core, bus_info: mlxsw_bus_info, p_env: &mlxsw_core->env); |
2190 | if (err) |
2191 | goto err_env_init; |
2192 | |
2193 | if (mlxsw_driver->init) { |
2194 | err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); |
2195 | if (err) |
2196 | goto err_driver_init; |
2197 | } |
2198 | |
2199 | if (!reload) |
2200 | devl_unlock(devlink); |
2201 | return 0; |
2202 | |
2203 | err_driver_init: |
2204 | mlxsw_env_fini(env: mlxsw_core->env); |
2205 | err_env_init: |
2206 | mlxsw_thermal_fini(thermal: mlxsw_core->thermal); |
2207 | err_thermal_init: |
2208 | mlxsw_hwmon_fini(mlxsw_hwmon: mlxsw_core->hwmon); |
2209 | err_hwmon_init: |
2210 | mlxsw_core_health_fini(mlxsw_core); |
2211 | err_health_init: |
2212 | mlxsw_linecards_fini(mlxsw_core); |
2213 | err_linecards_init: |
2214 | err_fw_rev_validate: |
2215 | if (!reload) |
2216 | mlxsw_core_params_unregister(mlxsw_core); |
2217 | err_register_params: |
2218 | mlxsw_emad_fini(mlxsw_core); |
2219 | err_emad_init: |
2220 | err_trap_groups_set: |
2221 | kfree(objp: mlxsw_core->lag.mapping); |
2222 | err_alloc_lag_mapping: |
2223 | mlxsw_ports_fini(mlxsw_core, reload); |
2224 | err_ports_init: |
2225 | if (!reload) |
2226 | devl_resources_unregister(devlink); |
2227 | err_register_resources: |
2228 | mlxsw_bus->fini(bus_priv); |
2229 | err_bus_init: |
2230 | mlxsw_core_irq_event_handler_fini(mlxsw_core); |
2231 | if (!reload) { |
2232 | devl_unregister(devlink); |
2233 | devl_unlock(devlink); |
2234 | devlink_free(devlink); |
2235 | } |
2236 | err_devlink_alloc: |
2237 | return err; |
2238 | } |
2239 | |
2240 | int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, |
2241 | const struct mlxsw_bus *mlxsw_bus, |
2242 | void *bus_priv, bool reload, |
2243 | struct devlink *devlink, |
2244 | struct netlink_ext_ack *extack) |
2245 | { |
2246 | bool called_again = false; |
2247 | int err; |
2248 | |
2249 | again: |
2250 | err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, |
2251 | bus_priv, reload, |
2252 | devlink, extack); |
2253 | /* -EAGAIN is returned in case the FW was updated. FW needs |
2254 | * a reset, so lets try to call __mlxsw_core_bus_device_register() |
2255 | * again. |
2256 | */ |
2257 | if (err == -EAGAIN && !called_again) { |
2258 | called_again = true; |
2259 | goto again; |
2260 | } |
2261 | |
2262 | return err; |
2263 | } |
2264 | EXPORT_SYMBOL(mlxsw_core_bus_device_register); |
2265 | |
2266 | void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, |
2267 | bool reload) |
2268 | { |
2269 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
2270 | |
2271 | if (!reload) |
2272 | devl_lock(devlink); |
2273 | |
2274 | if (devlink_is_reload_failed(devlink)) { |
2275 | if (!reload) |
2276 | /* Only the parts that were not de-initialized in the |
2277 | * failed reload attempt need to be de-initialized. |
2278 | */ |
2279 | goto reload_fail_deinit; |
2280 | else |
2281 | return; |
2282 | } |
2283 | |
2284 | if (mlxsw_core->driver->fini) |
2285 | mlxsw_core->driver->fini(mlxsw_core); |
2286 | mlxsw_env_fini(env: mlxsw_core->env); |
2287 | mlxsw_thermal_fini(thermal: mlxsw_core->thermal); |
2288 | mlxsw_hwmon_fini(mlxsw_hwmon: mlxsw_core->hwmon); |
2289 | mlxsw_core_health_fini(mlxsw_core); |
2290 | mlxsw_linecards_fini(mlxsw_core); |
2291 | if (!reload) |
2292 | mlxsw_core_params_unregister(mlxsw_core); |
2293 | mlxsw_emad_fini(mlxsw_core); |
2294 | kfree(objp: mlxsw_core->lag.mapping); |
2295 | mlxsw_ports_fini(mlxsw_core, reload); |
2296 | if (!reload) |
2297 | devl_resources_unregister(devlink); |
2298 | mlxsw_core->bus->fini(mlxsw_core->bus_priv); |
2299 | mlxsw_core_irq_event_handler_fini(mlxsw_core); |
2300 | if (!reload) { |
2301 | devl_unregister(devlink); |
2302 | devl_unlock(devlink); |
2303 | devlink_free(devlink); |
2304 | } |
2305 | |
2306 | return; |
2307 | |
2308 | reload_fail_deinit: |
2309 | mlxsw_core_params_unregister(mlxsw_core); |
2310 | devl_resources_unregister(devlink); |
2311 | devl_unregister(devlink); |
2312 | devl_unlock(devlink); |
2313 | devlink_free(devlink); |
2314 | } |
2315 | EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); |
2316 | |
2317 | bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, |
2318 | const struct mlxsw_tx_info *tx_info) |
2319 | { |
2320 | return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, |
2321 | tx_info); |
2322 | } |
2323 | EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); |
2324 | |
2325 | int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, |
2326 | const struct mlxsw_tx_info *tx_info) |
2327 | { |
2328 | return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, |
2329 | tx_info); |
2330 | } |
2331 | EXPORT_SYMBOL(mlxsw_core_skb_transmit); |
2332 | |
2333 | void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, |
2334 | struct sk_buff *skb, u16 local_port) |
2335 | { |
2336 | if (mlxsw_core->driver->ptp_transmitted) |
2337 | mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, |
2338 | local_port); |
2339 | } |
2340 | EXPORT_SYMBOL(mlxsw_core_ptp_transmitted); |
2341 | |
2342 | static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, |
2343 | const struct mlxsw_rx_listener *rxl_b) |
2344 | { |
2345 | return (rxl_a->func == rxl_b->func && |
2346 | rxl_a->local_port == rxl_b->local_port && |
2347 | rxl_a->trap_id == rxl_b->trap_id && |
2348 | rxl_a->mirror_reason == rxl_b->mirror_reason); |
2349 | } |
2350 | |
2351 | static struct mlxsw_rx_listener_item * |
2352 | __find_rx_listener_item(struct mlxsw_core *mlxsw_core, |
2353 | const struct mlxsw_rx_listener *rxl) |
2354 | { |
2355 | struct mlxsw_rx_listener_item *rxl_item; |
2356 | |
2357 | list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { |
2358 | if (__is_rx_listener_equal(rxl_a: &rxl_item->rxl, rxl_b: rxl)) |
2359 | return rxl_item; |
2360 | } |
2361 | return NULL; |
2362 | } |
2363 | |
2364 | int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, |
2365 | const struct mlxsw_rx_listener *rxl, |
2366 | void *priv, bool enabled) |
2367 | { |
2368 | struct mlxsw_rx_listener_item *rxl_item; |
2369 | |
2370 | rxl_item = __find_rx_listener_item(mlxsw_core, rxl); |
2371 | if (rxl_item) |
2372 | return -EEXIST; |
2373 | rxl_item = kmalloc(size: sizeof(*rxl_item), GFP_KERNEL); |
2374 | if (!rxl_item) |
2375 | return -ENOMEM; |
2376 | rxl_item->rxl = *rxl; |
2377 | rxl_item->priv = priv; |
2378 | rxl_item->enabled = enabled; |
2379 | |
2380 | list_add_rcu(new: &rxl_item->list, head: &mlxsw_core->rx_listener_list); |
2381 | return 0; |
2382 | } |
2383 | EXPORT_SYMBOL(mlxsw_core_rx_listener_register); |
2384 | |
2385 | void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, |
2386 | const struct mlxsw_rx_listener *rxl) |
2387 | { |
2388 | struct mlxsw_rx_listener_item *rxl_item; |
2389 | |
2390 | rxl_item = __find_rx_listener_item(mlxsw_core, rxl); |
2391 | if (!rxl_item) |
2392 | return; |
2393 | list_del_rcu(entry: &rxl_item->list); |
2394 | synchronize_rcu(); |
2395 | kfree(objp: rxl_item); |
2396 | } |
2397 | EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); |
2398 | |
2399 | static void |
2400 | mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, |
2401 | const struct mlxsw_rx_listener *rxl, |
2402 | bool enabled) |
2403 | { |
2404 | struct mlxsw_rx_listener_item *rxl_item; |
2405 | |
2406 | rxl_item = __find_rx_listener_item(mlxsw_core, rxl); |
2407 | if (WARN_ON(!rxl_item)) |
2408 | return; |
2409 | rxl_item->enabled = enabled; |
2410 | } |
2411 | |
2412 | static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, |
2413 | void *priv) |
2414 | { |
2415 | struct mlxsw_event_listener_item *event_listener_item = priv; |
2416 | struct mlxsw_core *mlxsw_core; |
2417 | struct mlxsw_reg_info reg; |
2418 | char *payload; |
2419 | char *reg_tlv; |
2420 | char *op_tlv; |
2421 | |
2422 | mlxsw_core = event_listener_item->mlxsw_core; |
2423 | trace_devlink_hwmsg(devlink: priv_to_devlink(priv: mlxsw_core), incoming: true, type: 0, |
2424 | buf: skb->data, len: skb->len); |
2425 | |
2426 | mlxsw_emad_tlv_parse(skb); |
2427 | op_tlv = mlxsw_emad_op_tlv(skb); |
2428 | reg_tlv = mlxsw_emad_reg_tlv(skb); |
2429 | |
2430 | reg.id = mlxsw_emad_op_tlv_register_id_get(buf: op_tlv); |
2431 | reg.len = (mlxsw_emad_reg_tlv_len_get(buf: reg_tlv) - 1) * sizeof(u32); |
2432 | payload = mlxsw_emad_reg_payload(reg_tlv); |
2433 | event_listener_item->el.func(®, payload, event_listener_item->priv); |
2434 | dev_kfree_skb(skb); |
2435 | } |
2436 | |
2437 | static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, |
2438 | const struct mlxsw_event_listener *el_b) |
2439 | { |
2440 | return (el_a->func == el_b->func && |
2441 | el_a->trap_id == el_b->trap_id); |
2442 | } |
2443 | |
2444 | static struct mlxsw_event_listener_item * |
2445 | __find_event_listener_item(struct mlxsw_core *mlxsw_core, |
2446 | const struct mlxsw_event_listener *el) |
2447 | { |
2448 | struct mlxsw_event_listener_item *el_item; |
2449 | |
2450 | list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { |
2451 | if (__is_event_listener_equal(el_a: &el_item->el, el_b: el)) |
2452 | return el_item; |
2453 | } |
2454 | return NULL; |
2455 | } |
2456 | |
2457 | int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, |
2458 | const struct mlxsw_event_listener *el, |
2459 | void *priv) |
2460 | { |
2461 | int err; |
2462 | struct mlxsw_event_listener_item *el_item; |
2463 | const struct mlxsw_rx_listener rxl = { |
2464 | .func = mlxsw_core_event_listener_func, |
2465 | .local_port = MLXSW_PORT_DONT_CARE, |
2466 | .trap_id = el->trap_id, |
2467 | }; |
2468 | |
2469 | el_item = __find_event_listener_item(mlxsw_core, el); |
2470 | if (el_item) |
2471 | return -EEXIST; |
2472 | el_item = kmalloc(size: sizeof(*el_item), GFP_KERNEL); |
2473 | if (!el_item) |
2474 | return -ENOMEM; |
2475 | el_item->mlxsw_core = mlxsw_core; |
2476 | el_item->el = *el; |
2477 | el_item->priv = priv; |
2478 | |
2479 | err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true); |
2480 | if (err) |
2481 | goto err_rx_listener_register; |
2482 | |
2483 | /* No reason to save item if we did not manage to register an RX |
2484 | * listener for it. |
2485 | */ |
2486 | list_add_rcu(new: &el_item->list, head: &mlxsw_core->event_listener_list); |
2487 | |
2488 | return 0; |
2489 | |
2490 | err_rx_listener_register: |
2491 | kfree(objp: el_item); |
2492 | return err; |
2493 | } |
2494 | EXPORT_SYMBOL(mlxsw_core_event_listener_register); |
2495 | |
2496 | void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, |
2497 | const struct mlxsw_event_listener *el) |
2498 | { |
2499 | struct mlxsw_event_listener_item *el_item; |
2500 | const struct mlxsw_rx_listener rxl = { |
2501 | .func = mlxsw_core_event_listener_func, |
2502 | .local_port = MLXSW_PORT_DONT_CARE, |
2503 | .trap_id = el->trap_id, |
2504 | }; |
2505 | |
2506 | el_item = __find_event_listener_item(mlxsw_core, el); |
2507 | if (!el_item) |
2508 | return; |
2509 | mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl); |
2510 | list_del(entry: &el_item->list); |
2511 | kfree(objp: el_item); |
2512 | } |
2513 | EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); |
2514 | |
2515 | static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, |
2516 | const struct mlxsw_listener *listener, |
2517 | void *priv, bool enabled) |
2518 | { |
2519 | if (listener->is_event) { |
2520 | WARN_ON(!enabled); |
2521 | return mlxsw_core_event_listener_register(mlxsw_core, |
2522 | &listener->event_listener, |
2523 | priv); |
2524 | } else { |
2525 | return mlxsw_core_rx_listener_register(mlxsw_core, |
2526 | &listener->rx_listener, |
2527 | priv, enabled); |
2528 | } |
2529 | } |
2530 | |
2531 | static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, |
2532 | const struct mlxsw_listener *listener, |
2533 | void *priv) |
2534 | { |
2535 | if (listener->is_event) |
2536 | mlxsw_core_event_listener_unregister(mlxsw_core, |
2537 | &listener->event_listener); |
2538 | else |
2539 | mlxsw_core_rx_listener_unregister(mlxsw_core, |
2540 | &listener->rx_listener); |
2541 | } |
2542 | |
2543 | int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, |
2544 | const struct mlxsw_listener *listener, void *priv) |
2545 | { |
2546 | enum mlxsw_reg_htgt_trap_group trap_group; |
2547 | enum mlxsw_reg_hpkt_action action; |
2548 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; |
2549 | int err; |
2550 | |
2551 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
2552 | return 0; |
2553 | |
2554 | err = mlxsw_core_listener_register(mlxsw_core, listener, priv, |
2555 | enabled: listener->enabled_on_register); |
2556 | if (err) |
2557 | return err; |
2558 | |
2559 | action = listener->enabled_on_register ? listener->en_action : |
2560 | listener->dis_action; |
2561 | trap_group = listener->enabled_on_register ? listener->en_trap_group : |
2562 | listener->dis_trap_group; |
2563 | mlxsw_reg_hpkt_pack(payload: hpkt_pl, action, trap_id: listener->trap_id, |
2564 | trap_group, is_ctrl: listener->is_ctrl); |
2565 | err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl); |
2566 | if (err) |
2567 | goto err_trap_set; |
2568 | |
2569 | return 0; |
2570 | |
2571 | err_trap_set: |
2572 | mlxsw_core_listener_unregister(mlxsw_core, listener, priv); |
2573 | return err; |
2574 | } |
2575 | EXPORT_SYMBOL(mlxsw_core_trap_register); |
2576 | |
2577 | void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, |
2578 | const struct mlxsw_listener *listener, |
2579 | void *priv) |
2580 | { |
2581 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; |
2582 | |
2583 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
2584 | return; |
2585 | |
2586 | if (!listener->is_event) { |
2587 | mlxsw_reg_hpkt_pack(payload: hpkt_pl, action: listener->dis_action, |
2588 | trap_id: listener->trap_id, trap_group: listener->dis_trap_group, |
2589 | is_ctrl: listener->is_ctrl); |
2590 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl); |
2591 | } |
2592 | |
2593 | mlxsw_core_listener_unregister(mlxsw_core, listener, priv); |
2594 | } |
2595 | EXPORT_SYMBOL(mlxsw_core_trap_unregister); |
2596 | |
2597 | int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core, |
2598 | const struct mlxsw_listener *listeners, |
2599 | size_t listeners_count, void *priv) |
2600 | { |
2601 | int i, err; |
2602 | |
2603 | for (i = 0; i < listeners_count; i++) { |
2604 | err = mlxsw_core_trap_register(mlxsw_core, |
2605 | &listeners[i], |
2606 | priv); |
2607 | if (err) |
2608 | goto err_listener_register; |
2609 | } |
2610 | return 0; |
2611 | |
2612 | err_listener_register: |
2613 | for (i--; i >= 0; i--) { |
2614 | mlxsw_core_trap_unregister(mlxsw_core, |
2615 | &listeners[i], |
2616 | priv); |
2617 | } |
2618 | return err; |
2619 | } |
2620 | EXPORT_SYMBOL(mlxsw_core_traps_register); |
2621 | |
2622 | void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core, |
2623 | const struct mlxsw_listener *listeners, |
2624 | size_t listeners_count, void *priv) |
2625 | { |
2626 | int i; |
2627 | |
2628 | for (i = 0; i < listeners_count; i++) { |
2629 | mlxsw_core_trap_unregister(mlxsw_core, |
2630 | &listeners[i], |
2631 | priv); |
2632 | } |
2633 | } |
2634 | EXPORT_SYMBOL(mlxsw_core_traps_unregister); |
2635 | |
2636 | int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core, |
2637 | const struct mlxsw_listener *listener, |
2638 | bool enabled) |
2639 | { |
2640 | enum mlxsw_reg_htgt_trap_group trap_group; |
2641 | enum mlxsw_reg_hpkt_action action; |
2642 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; |
2643 | int err; |
2644 | |
2645 | /* Not supported for event listener */ |
2646 | if (WARN_ON(listener->is_event)) |
2647 | return -EINVAL; |
2648 | |
2649 | action = enabled ? listener->en_action : listener->dis_action; |
2650 | trap_group = enabled ? listener->en_trap_group : |
2651 | listener->dis_trap_group; |
2652 | mlxsw_reg_hpkt_pack(payload: hpkt_pl, action, trap_id: listener->trap_id, |
2653 | trap_group, is_ctrl: listener->is_ctrl); |
2654 | err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl); |
2655 | if (err) |
2656 | return err; |
2657 | |
2658 | mlxsw_core_rx_listener_state_set(mlxsw_core, rxl: &listener->rx_listener, |
2659 | enabled); |
2660 | return 0; |
2661 | } |
2662 | EXPORT_SYMBOL(mlxsw_core_trap_state_set); |
2663 | |
2664 | static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) |
2665 | { |
2666 | return atomic64_inc_return(v: &mlxsw_core->emad.tid); |
2667 | } |
2668 | |
2669 | static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, |
2670 | const struct mlxsw_reg_info *reg, |
2671 | char *payload, |
2672 | enum mlxsw_core_reg_access_type type, |
2673 | struct list_head *bulk_list, |
2674 | mlxsw_reg_trans_cb_t *cb, |
2675 | unsigned long cb_priv) |
2676 | { |
2677 | u64 tid = mlxsw_core_tid_get(mlxsw_core); |
2678 | struct mlxsw_reg_trans *trans; |
2679 | int err; |
2680 | |
2681 | trans = kzalloc(size: sizeof(*trans), GFP_KERNEL); |
2682 | if (!trans) |
2683 | return -ENOMEM; |
2684 | |
2685 | err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, |
2686 | bulk_list, cb, cb_priv, tid); |
2687 | if (err) { |
2688 | kfree_rcu(trans, rcu); |
2689 | return err; |
2690 | } |
2691 | return 0; |
2692 | } |
2693 | |
2694 | int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, |
2695 | const struct mlxsw_reg_info *reg, char *payload, |
2696 | struct list_head *bulk_list, |
2697 | mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) |
2698 | { |
2699 | return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, |
2700 | type: MLXSW_CORE_REG_ACCESS_TYPE_QUERY, |
2701 | bulk_list, cb, cb_priv); |
2702 | } |
2703 | EXPORT_SYMBOL(mlxsw_reg_trans_query); |
2704 | |
2705 | int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, |
2706 | const struct mlxsw_reg_info *reg, char *payload, |
2707 | struct list_head *bulk_list, |
2708 | mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) |
2709 | { |
2710 | return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, |
2711 | type: MLXSW_CORE_REG_ACCESS_TYPE_WRITE, |
2712 | bulk_list, cb, cb_priv); |
2713 | } |
2714 | EXPORT_SYMBOL(mlxsw_reg_trans_write); |
2715 | |
2716 | #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256 |
2717 | |
2718 | static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) |
2719 | { |
2720 | char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE]; |
2721 | struct mlxsw_core *mlxsw_core = trans->core; |
2722 | int err; |
2723 | |
2724 | wait_for_completion(&trans->completion); |
2725 | cancel_delayed_work_sync(dwork: &trans->timeout_dw); |
2726 | err = trans->err; |
2727 | |
2728 | if (trans->retries) |
2729 | dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n" , |
2730 | trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); |
2731 | if (err) { |
2732 | dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n" , |
2733 | trans->tid, trans->reg->id, |
2734 | mlxsw_reg_id_str(trans->reg->id), |
2735 | mlxsw_core_reg_access_type_str(trans->type), |
2736 | trans->emad_status, |
2737 | mlxsw_emad_op_tlv_status_str(trans->emad_status)); |
2738 | |
2739 | snprintf(buf: err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE, |
2740 | fmt: "(tid=%llx,reg_id=%x(%s)) %s (%s)\n" , trans->tid, |
2741 | trans->reg->id, mlxsw_reg_id_str(reg_id: trans->reg->id), |
2742 | mlxsw_emad_op_tlv_status_str(status: trans->emad_status), |
2743 | trans->emad_err_string ? trans->emad_err_string : "" ); |
2744 | |
2745 | trace_devlink_hwerr(devlink: priv_to_devlink(priv: mlxsw_core), |
2746 | err: trans->emad_status, msg: err_string); |
2747 | |
2748 | kfree(objp: trans->emad_err_string); |
2749 | } |
2750 | |
2751 | list_del(entry: &trans->bulk_list); |
2752 | kfree_rcu(trans, rcu); |
2753 | return err; |
2754 | } |
2755 | |
2756 | int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) |
2757 | { |
2758 | struct mlxsw_reg_trans *trans; |
2759 | struct mlxsw_reg_trans *tmp; |
2760 | int sum_err = 0; |
2761 | int err; |
2762 | |
2763 | list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { |
2764 | err = mlxsw_reg_trans_wait(trans); |
2765 | if (err && sum_err == 0) |
2766 | sum_err = err; /* first error to be returned */ |
2767 | } |
2768 | return sum_err; |
2769 | } |
2770 | EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); |
2771 | |
2772 | struct mlxsw_core_irq_event_handler_item { |
2773 | struct list_head list; |
2774 | void (*cb)(struct mlxsw_core *mlxsw_core); |
2775 | }; |
2776 | |
2777 | int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core, |
2778 | mlxsw_irq_event_cb_t cb) |
2779 | { |
2780 | struct mlxsw_core_irq_event_handler_item *item; |
2781 | |
2782 | item = kzalloc(size: sizeof(*item), GFP_KERNEL); |
2783 | if (!item) |
2784 | return -ENOMEM; |
2785 | item->cb = cb; |
2786 | mutex_lock(&mlxsw_core->irq_event_handler_lock); |
2787 | list_add_tail(new: &item->list, head: &mlxsw_core->irq_event_handler_list); |
2788 | mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock); |
2789 | return 0; |
2790 | } |
2791 | EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register); |
2792 | |
2793 | void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core, |
2794 | mlxsw_irq_event_cb_t cb) |
2795 | { |
2796 | struct mlxsw_core_irq_event_handler_item *item, *tmp; |
2797 | |
2798 | mutex_lock(&mlxsw_core->irq_event_handler_lock); |
2799 | list_for_each_entry_safe(item, tmp, |
2800 | &mlxsw_core->irq_event_handler_list, list) { |
2801 | if (item->cb == cb) { |
2802 | list_del(entry: &item->list); |
2803 | kfree(objp: item); |
2804 | } |
2805 | } |
2806 | mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock); |
2807 | } |
2808 | EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister); |
2809 | |
2810 | void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core) |
2811 | { |
2812 | struct mlxsw_core_irq_event_handler_item *item; |
2813 | |
2814 | mutex_lock(&mlxsw_core->irq_event_handler_lock); |
2815 | list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) { |
2816 | if (item->cb) |
2817 | item->cb(mlxsw_core); |
2818 | } |
2819 | mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock); |
2820 | } |
2821 | EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call); |
2822 | |
2823 | static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, |
2824 | const struct mlxsw_reg_info *reg, |
2825 | char *payload, |
2826 | enum mlxsw_core_reg_access_type type) |
2827 | { |
2828 | enum mlxsw_emad_op_tlv_status status; |
2829 | int err, n_retry; |
2830 | bool reset_ok; |
2831 | char *in_mbox, *out_mbox, *tmp; |
2832 | |
2833 | dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n" , |
2834 | reg->id, mlxsw_reg_id_str(reg->id), |
2835 | mlxsw_core_reg_access_type_str(type)); |
2836 | |
2837 | in_mbox = mlxsw_cmd_mbox_alloc(); |
2838 | if (!in_mbox) |
2839 | return -ENOMEM; |
2840 | |
2841 | out_mbox = mlxsw_cmd_mbox_alloc(); |
2842 | if (!out_mbox) { |
2843 | err = -ENOMEM; |
2844 | goto free_in_mbox; |
2845 | } |
2846 | |
2847 | mlxsw_emad_pack_op_tlv(op_tlv: in_mbox, reg, type, |
2848 | tid: mlxsw_core_tid_get(mlxsw_core)); |
2849 | tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); |
2850 | mlxsw_emad_pack_reg_tlv(reg_tlv: tmp, reg, payload); |
2851 | |
2852 | /* There is a special treatment needed for MRSR (reset) register. |
2853 | * The command interface will return error after the command |
2854 | * is executed, so tell the lower layer to expect it |
2855 | * and cope accordingly. |
2856 | */ |
2857 | reset_ok = reg->id == MLXSW_REG_MRSR_ID; |
2858 | |
2859 | n_retry = 0; |
2860 | retry: |
2861 | err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); |
2862 | if (!err) { |
2863 | err = mlxsw_emad_process_status(op_tlv: out_mbox, p_status: &status); |
2864 | if (err) { |
2865 | if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) |
2866 | goto retry; |
2867 | dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n" , |
2868 | status, mlxsw_emad_op_tlv_status_str(status)); |
2869 | } |
2870 | } |
2871 | |
2872 | if (!err) |
2873 | memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox), |
2874 | reg->len); |
2875 | |
2876 | mlxsw_cmd_mbox_free(mbox: out_mbox); |
2877 | free_in_mbox: |
2878 | mlxsw_cmd_mbox_free(mbox: in_mbox); |
2879 | if (err) |
2880 | dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n" , |
2881 | reg->id, mlxsw_reg_id_str(reg->id), |
2882 | mlxsw_core_reg_access_type_str(type)); |
2883 | return err; |
2884 | } |
2885 | |
2886 | static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, |
2887 | char *payload, size_t payload_len, |
2888 | unsigned long cb_priv) |
2889 | { |
2890 | char *orig_payload = (char *) cb_priv; |
2891 | |
2892 | memcpy(orig_payload, payload, payload_len); |
2893 | } |
2894 | |
2895 | static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, |
2896 | const struct mlxsw_reg_info *reg, |
2897 | char *payload, |
2898 | enum mlxsw_core_reg_access_type type) |
2899 | { |
2900 | LIST_HEAD(bulk_list); |
2901 | int err; |
2902 | |
2903 | /* During initialization EMAD interface is not available to us, |
2904 | * so we default to command interface. We switch to EMAD interface |
2905 | * after setting the appropriate traps. |
2906 | */ |
2907 | if (!mlxsw_core->emad.use_emad) |
2908 | return mlxsw_core_reg_access_cmd(mlxsw_core, reg, |
2909 | payload, type); |
2910 | |
2911 | err = mlxsw_core_reg_access_emad(mlxsw_core, reg, |
2912 | payload, type, bulk_list: &bulk_list, |
2913 | cb: mlxsw_core_reg_access_cb, |
2914 | cb_priv: (unsigned long) payload); |
2915 | if (err) |
2916 | return err; |
2917 | return mlxsw_reg_trans_bulk_wait(&bulk_list); |
2918 | } |
2919 | |
2920 | int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, |
2921 | const struct mlxsw_reg_info *reg, char *payload) |
2922 | { |
2923 | return mlxsw_core_reg_access(mlxsw_core, reg, payload, |
2924 | type: MLXSW_CORE_REG_ACCESS_TYPE_QUERY); |
2925 | } |
2926 | EXPORT_SYMBOL(mlxsw_reg_query); |
2927 | |
2928 | int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, |
2929 | const struct mlxsw_reg_info *reg, char *payload) |
2930 | { |
2931 | return mlxsw_core_reg_access(mlxsw_core, reg, payload, |
2932 | type: MLXSW_CORE_REG_ACCESS_TYPE_WRITE); |
2933 | } |
2934 | EXPORT_SYMBOL(mlxsw_reg_write); |
2935 | |
2936 | void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, |
2937 | struct mlxsw_rx_info *rx_info) |
2938 | { |
2939 | struct mlxsw_rx_listener_item *rxl_item; |
2940 | const struct mlxsw_rx_listener *rxl; |
2941 | u16 local_port; |
2942 | bool found = false; |
2943 | |
2944 | if (rx_info->is_lag) { |
2945 | dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n" , |
2946 | __func__, rx_info->u.lag_id, |
2947 | rx_info->trap_id); |
2948 | /* Upper layer does not care if the skb came from LAG or not, |
2949 | * so just get the local_port for the lag port and push it up. |
2950 | */ |
2951 | local_port = mlxsw_core_lag_mapping_get(mlxsw_core, |
2952 | lag_id: rx_info->u.lag_id, |
2953 | port_index: rx_info->lag_port_index); |
2954 | } else { |
2955 | local_port = rx_info->u.sys_port; |
2956 | } |
2957 | |
2958 | dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n" , |
2959 | __func__, local_port, rx_info->trap_id); |
2960 | |
2961 | if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || |
2962 | (local_port >= mlxsw_core->max_ports)) |
2963 | goto drop; |
2964 | |
2965 | rcu_read_lock(); |
2966 | list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { |
2967 | rxl = &rxl_item->rxl; |
2968 | if ((rxl->local_port == MLXSW_PORT_DONT_CARE || |
2969 | rxl->local_port == local_port) && |
2970 | rxl->trap_id == rx_info->trap_id && |
2971 | rxl->mirror_reason == rx_info->mirror_reason) { |
2972 | if (rxl_item->enabled) |
2973 | found = true; |
2974 | break; |
2975 | } |
2976 | } |
2977 | if (!found) { |
2978 | rcu_read_unlock(); |
2979 | goto drop; |
2980 | } |
2981 | |
2982 | rxl->func(skb, local_port, rxl_item->priv); |
2983 | rcu_read_unlock(); |
2984 | return; |
2985 | |
2986 | drop: |
2987 | dev_kfree_skb(skb); |
2988 | } |
2989 | EXPORT_SYMBOL(mlxsw_core_skb_receive); |
2990 | |
2991 | static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, |
2992 | u16 lag_id, u8 port_index) |
2993 | { |
2994 | return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + |
2995 | port_index; |
2996 | } |
2997 | |
2998 | void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, |
2999 | u16 lag_id, u8 port_index, u16 local_port) |
3000 | { |
3001 | int index = mlxsw_core_lag_mapping_index(mlxsw_core, |
3002 | lag_id, port_index); |
3003 | |
3004 | mlxsw_core->lag.mapping[index] = local_port; |
3005 | } |
3006 | EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); |
3007 | |
3008 | u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, |
3009 | u16 lag_id, u8 port_index) |
3010 | { |
3011 | int index = mlxsw_core_lag_mapping_index(mlxsw_core, |
3012 | lag_id, port_index); |
3013 | |
3014 | return mlxsw_core->lag.mapping[index]; |
3015 | } |
3016 | EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); |
3017 | |
3018 | void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, |
3019 | u16 lag_id, u16 local_port) |
3020 | { |
3021 | int i; |
3022 | |
3023 | for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { |
3024 | int index = mlxsw_core_lag_mapping_index(mlxsw_core, |
3025 | lag_id, port_index: i); |
3026 | |
3027 | if (mlxsw_core->lag.mapping[index] == local_port) |
3028 | mlxsw_core->lag.mapping[index] = 0; |
3029 | } |
3030 | } |
3031 | EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); |
3032 | |
3033 | bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, |
3034 | enum mlxsw_res_id res_id) |
3035 | { |
3036 | return mlxsw_res_valid(res: &mlxsw_core->res, res_id); |
3037 | } |
3038 | EXPORT_SYMBOL(mlxsw_core_res_valid); |
3039 | |
3040 | u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, |
3041 | enum mlxsw_res_id res_id) |
3042 | { |
3043 | return mlxsw_res_get(res: &mlxsw_core->res, res_id); |
3044 | } |
3045 | EXPORT_SYMBOL(mlxsw_core_res_get); |
3046 | |
3047 | static const struct devlink_port_ops mlxsw_devlink_port_ops = { |
3048 | .port_split = mlxsw_devlink_port_split, |
3049 | .port_unsplit = mlxsw_devlink_port_unsplit, |
3050 | }; |
3051 | |
3052 | static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, |
3053 | enum devlink_port_flavour flavour, |
3054 | u8 slot_index, u32 port_number, bool split, |
3055 | u32 split_port_subnumber, |
3056 | bool splittable, u32 lanes, |
3057 | const unsigned char *switch_id, |
3058 | unsigned char switch_id_len) |
3059 | { |
3060 | struct devlink *devlink = priv_to_devlink(priv: mlxsw_core); |
3061 | struct mlxsw_core_port *mlxsw_core_port = |
3062 | &mlxsw_core->ports[local_port]; |
3063 | struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; |
3064 | struct devlink_port_attrs attrs = {}; |
3065 | int err; |
3066 | |
3067 | attrs.split = split; |
3068 | attrs.lanes = lanes; |
3069 | attrs.splittable = splittable; |
3070 | attrs.flavour = flavour; |
3071 | attrs.phys.port_number = port_number; |
3072 | attrs.phys.split_subport_number = split_port_subnumber; |
3073 | memcpy(attrs.switch_id.id, switch_id, switch_id_len); |
3074 | attrs.switch_id.id_len = switch_id_len; |
3075 | mlxsw_core_port->local_port = local_port; |
3076 | devlink_port_attrs_set(devlink_port, devlink_port_attrs: &attrs); |
3077 | if (slot_index) { |
3078 | struct mlxsw_linecard *linecard; |
3079 | |
3080 | linecard = mlxsw_linecard_get(linecards: mlxsw_core->linecards, |
3081 | slot_index); |
3082 | mlxsw_core_port->linecard = linecard; |
3083 | devlink_port_linecard_set(devlink_port, |
3084 | linecard: linecard->devlink_linecard); |
3085 | } |
3086 | err = devl_port_register_with_ops(devlink, devlink_port, port_index: local_port, |
3087 | ops: &mlxsw_devlink_port_ops); |
3088 | if (err) |
3089 | memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); |
3090 | return err; |
3091 | } |
3092 | |
3093 | static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) |
3094 | { |
3095 | struct mlxsw_core_port *mlxsw_core_port = |
3096 | &mlxsw_core->ports[local_port]; |
3097 | struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; |
3098 | |
3099 | devl_port_unregister(devlink_port); |
3100 | memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); |
3101 | } |
3102 | |
3103 | int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, |
3104 | u8 slot_index, u32 port_number, bool split, |
3105 | u32 split_port_subnumber, |
3106 | bool splittable, u32 lanes, |
3107 | const unsigned char *switch_id, |
3108 | unsigned char switch_id_len) |
3109 | { |
3110 | int err; |
3111 | |
3112 | err = __mlxsw_core_port_init(mlxsw_core, local_port, |
3113 | flavour: DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index, |
3114 | port_number, split, split_port_subnumber, |
3115 | splittable, lanes, |
3116 | switch_id, switch_id_len); |
3117 | if (err) |
3118 | return err; |
3119 | |
3120 | atomic_inc(v: &mlxsw_core->active_ports_count); |
3121 | return 0; |
3122 | } |
3123 | EXPORT_SYMBOL(mlxsw_core_port_init); |
3124 | |
3125 | void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) |
3126 | { |
3127 | atomic_dec(v: &mlxsw_core->active_ports_count); |
3128 | |
3129 | __mlxsw_core_port_fini(mlxsw_core, local_port); |
3130 | } |
3131 | EXPORT_SYMBOL(mlxsw_core_port_fini); |
3132 | |
3133 | int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, |
3134 | void *port_driver_priv, |
3135 | const unsigned char *switch_id, |
3136 | unsigned char switch_id_len) |
3137 | { |
3138 | struct mlxsw_core_port *mlxsw_core_port = |
3139 | &mlxsw_core->ports[MLXSW_PORT_CPU_PORT]; |
3140 | int err; |
3141 | |
3142 | err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, |
3143 | flavour: DEVLINK_PORT_FLAVOUR_CPU, |
3144 | slot_index: 0, port_number: 0, split: false, split_port_subnumber: 0, splittable: false, lanes: 0, |
3145 | switch_id, switch_id_len); |
3146 | if (err) |
3147 | return err; |
3148 | |
3149 | mlxsw_core_port->port_driver_priv = port_driver_priv; |
3150 | return 0; |
3151 | } |
3152 | EXPORT_SYMBOL(mlxsw_core_cpu_port_init); |
3153 | |
3154 | void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) |
3155 | { |
3156 | __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT); |
3157 | } |
3158 | EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); |
3159 | |
3160 | void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port, |
3161 | void *port_driver_priv, struct net_device *dev) |
3162 | { |
3163 | struct mlxsw_core_port *mlxsw_core_port = |
3164 | &mlxsw_core->ports[local_port]; |
3165 | struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; |
3166 | |
3167 | mlxsw_core_port->port_driver_priv = port_driver_priv; |
3168 | SET_NETDEV_DEVLINK_PORT(dev, devlink_port); |
3169 | } |
3170 | EXPORT_SYMBOL(mlxsw_core_port_netdev_link); |
3171 | |
3172 | struct devlink_port * |
3173 | mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, |
3174 | u16 local_port) |
3175 | { |
3176 | struct mlxsw_core_port *mlxsw_core_port = |
3177 | &mlxsw_core->ports[local_port]; |
3178 | struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; |
3179 | |
3180 | return devlink_port; |
3181 | } |
3182 | EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); |
3183 | |
3184 | struct mlxsw_linecard * |
3185 | mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, |
3186 | u16 local_port) |
3187 | { |
3188 | struct mlxsw_core_port *mlxsw_core_port = |
3189 | &mlxsw_core->ports[local_port]; |
3190 | |
3191 | return mlxsw_core_port->linecard; |
3192 | } |
3193 | |
3194 | void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, |
3195 | bool (*selector)(void *priv, u16 local_port), |
3196 | void *priv) |
3197 | { |
3198 | if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected)) |
3199 | return; |
3200 | mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv); |
3201 | } |
3202 | |
3203 | struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) |
3204 | { |
3205 | return mlxsw_core->env; |
3206 | } |
3207 | |
3208 | static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, |
3209 | const char *buf, size_t size) |
3210 | { |
3211 | __be32 *m = (__be32 *) buf; |
3212 | int i; |
3213 | int count = size / sizeof(__be32); |
3214 | |
3215 | for (i = count - 1; i >= 0; i--) |
3216 | if (m[i]) |
3217 | break; |
3218 | i++; |
3219 | count = i ? i : 1; |
3220 | for (i = 0; i < count; i += 4) |
3221 | dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n" , |
3222 | i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), |
3223 | be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); |
3224 | } |
3225 | |
3226 | int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, |
3227 | u32 in_mod, bool out_mbox_direct, bool reset_ok, |
3228 | char *in_mbox, size_t in_mbox_size, |
3229 | char *out_mbox, size_t out_mbox_size) |
3230 | { |
3231 | u8 status; |
3232 | int err; |
3233 | |
3234 | BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); |
3235 | if (!mlxsw_core->bus->cmd_exec) |
3236 | return -EOPNOTSUPP; |
3237 | |
3238 | dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n" , |
3239 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); |
3240 | if (in_mbox) { |
3241 | dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n" ); |
3242 | mlxsw_core_buf_dump_dbg(mlxsw_core, buf: in_mbox, size: in_mbox_size); |
3243 | } |
3244 | |
3245 | err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, |
3246 | opcode_mod, in_mod, out_mbox_direct, |
3247 | in_mbox, in_mbox_size, |
3248 | out_mbox, out_mbox_size, &status); |
3249 | |
3250 | if (!err && out_mbox) { |
3251 | dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n" ); |
3252 | mlxsw_core_buf_dump_dbg(mlxsw_core, buf: out_mbox, size: out_mbox_size); |
3253 | } |
3254 | |
3255 | if (reset_ok && err == -EIO && |
3256 | status == MLXSW_CMD_STATUS_RUNNING_RESET) { |
3257 | err = 0; |
3258 | } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { |
3259 | dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n" , |
3260 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, |
3261 | in_mod, status, mlxsw_cmd_status_str(status)); |
3262 | } else if (err == -ETIMEDOUT) { |
3263 | dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n" , |
3264 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, |
3265 | in_mod); |
3266 | } |
3267 | |
3268 | return err; |
3269 | } |
3270 | EXPORT_SYMBOL(mlxsw_cmd_exec); |
3271 | |
3272 | int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) |
3273 | { |
3274 | return queue_delayed_work(wq: mlxsw_wq, dwork, delay); |
3275 | } |
3276 | EXPORT_SYMBOL(mlxsw_core_schedule_dw); |
3277 | |
3278 | bool mlxsw_core_schedule_work(struct work_struct *work) |
3279 | { |
3280 | return queue_work(wq: mlxsw_owq, work); |
3281 | } |
3282 | EXPORT_SYMBOL(mlxsw_core_schedule_work); |
3283 | |
3284 | void mlxsw_core_flush_owq(void) |
3285 | { |
3286 | flush_workqueue(mlxsw_owq); |
3287 | } |
3288 | EXPORT_SYMBOL(mlxsw_core_flush_owq); |
3289 | |
3290 | int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, |
3291 | const struct mlxsw_config_profile *profile, |
3292 | u64 *p_single_size, u64 *p_double_size, |
3293 | u64 *p_linear_size) |
3294 | { |
3295 | struct mlxsw_driver *driver = mlxsw_core->driver; |
3296 | |
3297 | if (!driver->kvd_sizes_get) |
3298 | return -EINVAL; |
3299 | |
3300 | return driver->kvd_sizes_get(mlxsw_core, profile, |
3301 | p_single_size, p_double_size, |
3302 | p_linear_size); |
3303 | } |
3304 | EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); |
3305 | |
3306 | int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, |
3307 | struct mlxsw_res *res) |
3308 | { |
3309 | int index, i; |
3310 | u64 data; |
3311 | u16 id; |
3312 | int err; |
3313 | |
3314 | mlxsw_cmd_mbox_zero(mbox); |
3315 | |
3316 | for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; |
3317 | index++) { |
3318 | err = mlxsw_cmd_query_resources(mlxsw_core, out_mbox: mbox, index); |
3319 | if (err) |
3320 | return err; |
3321 | |
3322 | for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { |
3323 | id = mlxsw_cmd_mbox_query_resource_id_get(buf: mbox, index: i); |
3324 | data = mlxsw_cmd_mbox_query_resource_data_get(buf: mbox, index: i); |
3325 | |
3326 | if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) |
3327 | return 0; |
3328 | |
3329 | mlxsw_res_parse(res, id, value: data); |
3330 | } |
3331 | } |
3332 | |
3333 | /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get |
3334 | * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. |
3335 | */ |
3336 | return -EIO; |
3337 | } |
3338 | EXPORT_SYMBOL(mlxsw_core_resources_query); |
3339 | |
3340 | u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core) |
3341 | { |
3342 | return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv); |
3343 | } |
3344 | EXPORT_SYMBOL(mlxsw_core_read_frc_h); |
3345 | |
3346 | u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core) |
3347 | { |
3348 | return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv); |
3349 | } |
3350 | EXPORT_SYMBOL(mlxsw_core_read_frc_l); |
3351 | |
3352 | u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core) |
3353 | { |
3354 | return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv); |
3355 | } |
3356 | EXPORT_SYMBOL(mlxsw_core_read_utc_sec); |
3357 | |
3358 | u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core) |
3359 | { |
3360 | return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv); |
3361 | } |
3362 | EXPORT_SYMBOL(mlxsw_core_read_utc_nsec); |
3363 | |
3364 | bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core) |
3365 | { |
3366 | return mlxsw_core->driver->sdq_supports_cqe_v2; |
3367 | } |
3368 | EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2); |
3369 | |
3370 | static int __init mlxsw_core_module_init(void) |
3371 | { |
3372 | int err; |
3373 | |
3374 | err = mlxsw_linecard_driver_register(); |
3375 | if (err) |
3376 | return err; |
3377 | |
3378 | mlxsw_wq = alloc_workqueue(fmt: mlxsw_core_driver_name, flags: 0, max_active: 0); |
3379 | if (!mlxsw_wq) { |
3380 | err = -ENOMEM; |
3381 | goto err_alloc_workqueue; |
3382 | } |
3383 | mlxsw_owq = alloc_ordered_workqueue("%s_ordered" , 0, |
3384 | mlxsw_core_driver_name); |
3385 | if (!mlxsw_owq) { |
3386 | err = -ENOMEM; |
3387 | goto err_alloc_ordered_workqueue; |
3388 | } |
3389 | return 0; |
3390 | |
3391 | err_alloc_ordered_workqueue: |
3392 | destroy_workqueue(wq: mlxsw_wq); |
3393 | err_alloc_workqueue: |
3394 | mlxsw_linecard_driver_unregister(); |
3395 | return err; |
3396 | } |
3397 | |
3398 | static void __exit mlxsw_core_module_exit(void) |
3399 | { |
3400 | destroy_workqueue(wq: mlxsw_owq); |
3401 | destroy_workqueue(wq: mlxsw_wq); |
3402 | mlxsw_linecard_driver_unregister(); |
3403 | } |
3404 | |
3405 | module_init(mlxsw_core_module_init); |
3406 | module_exit(mlxsw_core_module_exit); |
3407 | |
3408 | MODULE_LICENSE("Dual BSD/GPL" ); |
3409 | MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>" ); |
3410 | MODULE_DESCRIPTION("Mellanox switch device core driver" ); |
3411 | |