1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4#include <linux/dpll.h>
5#include <linux/mlx5/driver.h>
6
7/* This structure represents a reference to DPLL, one is created
8 * per mdev instance.
9 */
10struct mlx5_dpll {
11 struct dpll_device *dpll;
12 struct dpll_pin *dpll_pin;
13 struct mlx5_core_dev *mdev;
14 struct workqueue_struct *wq;
15 struct delayed_work work;
16 struct {
17 bool valid;
18 enum dpll_lock_status lock_status;
19 enum dpll_pin_state pin_state;
20 } last;
21 struct notifier_block mdev_nb;
22 struct net_device *tracking_netdev;
23};
24
25static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
26{
27 u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
28 u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
29 int err;
30
31 err = mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sizeof(in), data_out: out, size_out: sizeof(out),
32 reg_num: MLX5_REG_MSECQ, arg: 0, write: 0);
33 if (err)
34 return err;
35 *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
36 return 0;
37}
38
39struct mlx5_dpll_synce_status {
40 enum mlx5_msees_admin_status admin_status;
41 enum mlx5_msees_oper_status oper_status;
42 bool ho_acq;
43 bool oper_freq_measure;
44 enum mlx5_msees_failure_reason failure_reason;
45 s32 frequency_diff;
46};
47
48static int
49mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
50 struct mlx5_dpll_synce_status *synce_status)
51{
52 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
53 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
54 int err;
55
56 err = mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sizeof(in), data_out: out, size_out: sizeof(out),
57 reg_num: MLX5_REG_MSEES, arg: 0, write: 0);
58 if (err)
59 return err;
60 synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
61 synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
62 synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
63 synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
64 synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
65 synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
66 return 0;
67}
68
69static int
70mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
71 enum mlx5_msees_admin_status admin_status)
72{
73 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
74 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
75
76 MLX5_SET(msees_reg, in, field_select,
77 MLX5_MSEES_FIELD_SELECT_ENABLE |
78 MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
79 MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
80 MLX5_SET(msees_reg, in, admin_status, admin_status);
81 MLX5_SET(msees_reg, in, admin_freq_measure, true);
82 return mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sizeof(in), data_out: out, size_out: sizeof(out),
83 reg_num: MLX5_REG_MSEES, arg: 0, write: 1);
84}
85
86static enum dpll_lock_status
87mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
88{
89 switch (synce_status->oper_status) {
90 case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
91 fallthrough;
92 case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
93 return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
94 DPLL_LOCK_STATUS_LOCKED;
95 case MLX5_MSEES_OPER_STATUS_HOLDOVER:
96 fallthrough;
97 case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
98 return DPLL_LOCK_STATUS_HOLDOVER;
99 default:
100 return DPLL_LOCK_STATUS_UNLOCKED;
101 }
102}
103
104static enum dpll_lock_status_error
105mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
106{
107 switch (synce_status->oper_status) {
108 case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
109 fallthrough;
110 case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
111 switch (synce_status->failure_reason) {
112 case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
113 return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
114 case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
115 return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
116 default:
117 return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
118 }
119 default:
120 return DPLL_LOCK_STATUS_ERROR_NONE;
121 }
122}
123
124static enum dpll_pin_state
125mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
126{
127 return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
128 (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
129 synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
130 DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
131}
132
133static int
134mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
135 s64 *ffo)
136{
137 if (!synce_status->oper_freq_measure)
138 return -ENODATA;
139 *ffo = synce_status->frequency_diff;
140 return 0;
141}
142
143static int
144mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
145 enum dpll_lock_status *status,
146 enum dpll_lock_status_error *status_error,
147 struct netlink_ext_ack *extack)
148{
149 struct mlx5_dpll_synce_status synce_status;
150 struct mlx5_dpll *mdpll = priv;
151 int err;
152
153 err = mlx5_dpll_synce_status_get(mdev: mdpll->mdev, synce_status: &synce_status);
154 if (err)
155 return err;
156 *status = mlx5_dpll_lock_status_get(synce_status: &synce_status);
157 *status_error = mlx5_dpll_lock_status_error_get(synce_status: &synce_status);
158 return 0;
159}
160
161static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
162 void *priv, enum dpll_mode *mode,
163 struct netlink_ext_ack *extack)
164{
165 *mode = DPLL_MODE_MANUAL;
166 return 0;
167}
168
169static const struct dpll_device_ops mlx5_dpll_device_ops = {
170 .lock_status_get = mlx5_dpll_device_lock_status_get,
171 .mode_get = mlx5_dpll_device_mode_get,
172};
173
174static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
175 void *pin_priv,
176 const struct dpll_device *dpll,
177 void *dpll_priv,
178 enum dpll_pin_direction *direction,
179 struct netlink_ext_ack *extack)
180{
181 *direction = DPLL_PIN_DIRECTION_INPUT;
182 return 0;
183}
184
185static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
186 void *pin_priv,
187 const struct dpll_device *dpll,
188 void *dpll_priv,
189 enum dpll_pin_state *state,
190 struct netlink_ext_ack *extack)
191{
192 struct mlx5_dpll_synce_status synce_status;
193 struct mlx5_dpll *mdpll = pin_priv;
194 int err;
195
196 err = mlx5_dpll_synce_status_get(mdev: mdpll->mdev, synce_status: &synce_status);
197 if (err)
198 return err;
199 *state = mlx5_dpll_pin_state_get(synce_status: &synce_status);
200 return 0;
201}
202
203static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
204 void *pin_priv,
205 const struct dpll_device *dpll,
206 void *dpll_priv,
207 enum dpll_pin_state state,
208 struct netlink_ext_ack *extack)
209{
210 struct mlx5_dpll *mdpll = pin_priv;
211
212 return mlx5_dpll_synce_status_set(mdev: mdpll->mdev,
213 admin_status: state == DPLL_PIN_STATE_CONNECTED ?
214 MLX5_MSEES_ADMIN_STATUS_TRACK :
215 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
216}
217
218static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
219 const struct dpll_device *dpll, void *dpll_priv,
220 s64 *ffo, struct netlink_ext_ack *extack)
221{
222 struct mlx5_dpll_synce_status synce_status;
223 struct mlx5_dpll *mdpll = pin_priv;
224 int err;
225
226 err = mlx5_dpll_synce_status_get(mdev: mdpll->mdev, synce_status: &synce_status);
227 if (err)
228 return err;
229 return mlx5_dpll_pin_ffo_get(synce_status: &synce_status, ffo);
230}
231
232static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
233 .direction_get = mlx5_dpll_pin_direction_get,
234 .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
235 .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
236 .ffo_get = mlx5_dpll_ffo_get,
237};
238
239static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
240 .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
241 .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
242};
243
244#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
245
246static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
247{
248 queue_delayed_work(wq: mdpll->wq, dwork: &mdpll->work,
249 delay: msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
250}
251
252static void mlx5_dpll_periodic_work(struct work_struct *work)
253{
254 struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
255 work.work);
256 struct mlx5_dpll_synce_status synce_status;
257 enum dpll_lock_status lock_status;
258 enum dpll_pin_state pin_state;
259 int err;
260
261 err = mlx5_dpll_synce_status_get(mdev: mdpll->mdev, synce_status: &synce_status);
262 if (err)
263 goto err_out;
264 lock_status = mlx5_dpll_lock_status_get(synce_status: &synce_status);
265 pin_state = mlx5_dpll_pin_state_get(synce_status: &synce_status);
266
267 if (!mdpll->last.valid)
268 goto invalid_out;
269
270 if (mdpll->last.lock_status != lock_status)
271 dpll_device_change_ntf(dpll: mdpll->dpll);
272 if (mdpll->last.pin_state != pin_state)
273 dpll_pin_change_ntf(pin: mdpll->dpll_pin);
274
275invalid_out:
276 mdpll->last.lock_status = lock_status;
277 mdpll->last.pin_state = pin_state;
278 mdpll->last.valid = true;
279err_out:
280 mlx5_dpll_periodic_work_queue(mdpll);
281}
282
283static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
284 struct net_device *netdev)
285{
286 if (mdpll->tracking_netdev)
287 return;
288 dpll_netdev_pin_set(dev: netdev, dpll_pin: mdpll->dpll_pin);
289 mdpll->tracking_netdev = netdev;
290}
291
292static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
293{
294 if (!mdpll->tracking_netdev)
295 return;
296 dpll_netdev_pin_clear(dev: mdpll->tracking_netdev);
297 mdpll->tracking_netdev = NULL;
298}
299
300static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
301 unsigned long event, void *data)
302{
303 struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
304 struct net_device *netdev = data;
305
306 switch (event) {
307 case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
308 if (netdev)
309 mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
310 else
311 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
312 break;
313 default:
314 return NOTIFY_DONE;
315 }
316
317 return NOTIFY_OK;
318}
319
320static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
321 struct mlx5_core_dev *mdev)
322{
323 mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
324 mlx5_blocking_notifier_register(dev: mdev, nb: &mdpll->mdev_nb);
325 mlx5_core_uplink_netdev_event_replay(mdev);
326}
327
328static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
329 struct mlx5_core_dev *mdev)
330{
331 mlx5_blocking_notifier_unregister(dev: mdev, nb: &mdpll->mdev_nb);
332 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
333}
334
335static int mlx5_dpll_probe(struct auxiliary_device *adev,
336 const struct auxiliary_device_id *id)
337{
338 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
339 struct mlx5_core_dev *mdev = edev->mdev;
340 struct mlx5_dpll *mdpll;
341 u64 clock_id;
342 int err;
343
344 err = mlx5_dpll_synce_status_set(mdev,
345 admin_status: MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
346 if (err)
347 return err;
348
349 err = mlx5_dpll_clock_id_get(mdev, clock_id: &clock_id);
350 if (err)
351 return err;
352
353 mdpll = kzalloc(size: sizeof(*mdpll), GFP_KERNEL);
354 if (!mdpll)
355 return -ENOMEM;
356 mdpll->mdev = mdev;
357 auxiliary_set_drvdata(auxdev: adev, data: mdpll);
358
359 /* Multiple mdev instances might share one DPLL device. */
360 mdpll->dpll = dpll_device_get(clock_id, dev_driver_id: 0, THIS_MODULE);
361 if (IS_ERR(ptr: mdpll->dpll)) {
362 err = PTR_ERR(ptr: mdpll->dpll);
363 goto err_free_mdpll;
364 }
365
366 err = dpll_device_register(dpll: mdpll->dpll, type: DPLL_TYPE_EEC,
367 ops: &mlx5_dpll_device_ops, priv: mdpll);
368 if (err)
369 goto err_put_dpll_device;
370
371 /* Multiple mdev instances might share one DPLL pin. */
372 mdpll->dpll_pin = dpll_pin_get(clock_id, dev_driver_id: mlx5_get_dev_index(dev: mdev),
373 THIS_MODULE, prop: &mlx5_dpll_pin_properties);
374 if (IS_ERR(ptr: mdpll->dpll_pin)) {
375 err = PTR_ERR(ptr: mdpll->dpll_pin);
376 goto err_unregister_dpll_device;
377 }
378
379 err = dpll_pin_register(dpll: mdpll->dpll, pin: mdpll->dpll_pin,
380 ops: &mlx5_dpll_pins_ops, priv: mdpll);
381 if (err)
382 goto err_put_dpll_pin;
383
384 mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
385 if (!mdpll->wq) {
386 err = -ENOMEM;
387 goto err_unregister_dpll_pin;
388 }
389
390 mlx5_dpll_mdev_netdev_track(mdpll, mdev);
391
392 INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
393 mlx5_dpll_periodic_work_queue(mdpll);
394
395 return 0;
396
397err_unregister_dpll_pin:
398 dpll_pin_unregister(dpll: mdpll->dpll, pin: mdpll->dpll_pin,
399 ops: &mlx5_dpll_pins_ops, priv: mdpll);
400err_put_dpll_pin:
401 dpll_pin_put(pin: mdpll->dpll_pin);
402err_unregister_dpll_device:
403 dpll_device_unregister(dpll: mdpll->dpll, ops: &mlx5_dpll_device_ops, priv: mdpll);
404err_put_dpll_device:
405 dpll_device_put(dpll: mdpll->dpll);
406err_free_mdpll:
407 kfree(objp: mdpll);
408 return err;
409}
410
411static void mlx5_dpll_remove(struct auxiliary_device *adev)
412{
413 struct mlx5_dpll *mdpll = auxiliary_get_drvdata(auxdev: adev);
414 struct mlx5_core_dev *mdev = mdpll->mdev;
415
416 cancel_delayed_work_sync(dwork: &mdpll->work);
417 mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
418 destroy_workqueue(wq: mdpll->wq);
419 dpll_pin_unregister(dpll: mdpll->dpll, pin: mdpll->dpll_pin,
420 ops: &mlx5_dpll_pins_ops, priv: mdpll);
421 dpll_pin_put(pin: mdpll->dpll_pin);
422 dpll_device_unregister(dpll: mdpll->dpll, ops: &mlx5_dpll_device_ops, priv: mdpll);
423 dpll_device_put(dpll: mdpll->dpll);
424 kfree(objp: mdpll);
425
426 mlx5_dpll_synce_status_set(mdev,
427 admin_status: MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
428}
429
430static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
431{
432 return 0;
433}
434
435static int mlx5_dpll_resume(struct auxiliary_device *adev)
436{
437 return 0;
438}
439
440static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
441 { .name = MLX5_ADEV_NAME ".dpll", },
442 {},
443};
444
445MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
446
447static struct auxiliary_driver mlx5_dpll_driver = {
448 .name = "dpll",
449 .probe = mlx5_dpll_probe,
450 .remove = mlx5_dpll_remove,
451 .suspend = mlx5_dpll_suspend,
452 .resume = mlx5_dpll_resume,
453 .id_table = mlx5_dpll_id_table,
454};
455
456static int __init mlx5_dpll_init(void)
457{
458 return auxiliary_driver_register(&mlx5_dpll_driver);
459}
460
461static void __exit mlx5_dpll_exit(void)
462{
463 auxiliary_driver_unregister(auxdrv: &mlx5_dpll_driver);
464}
465
466module_init(mlx5_dpll_init);
467module_exit(mlx5_dpll_exit);
468
469MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>");
470MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
471MODULE_LICENSE("Dual BSD/GPL");
472

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/dpll.c