1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/uaccess.h>
22#include <linux/units.h>
23#include <linux/fpga-dfl.h>
24
25#include "dfl.h"
26#include "dfl-fme.h"
27
28static ssize_t ports_num_show(struct device *dev,
29 struct device_attribute *attr, char *buf)
30{
31 void __iomem *base;
32 u64 v;
33
34 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
35
36 v = readq(addr: base + FME_HDR_CAP);
37
38 return scnprintf(buf, PAGE_SIZE, fmt: "%u\n",
39 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
40}
41static DEVICE_ATTR_RO(ports_num);
42
43/*
44 * Bitstream (static FPGA region) identifier number. It contains the
45 * detailed version and other information of this static FPGA region.
46 */
47static ssize_t bitstream_id_show(struct device *dev,
48 struct device_attribute *attr, char *buf)
49{
50 void __iomem *base;
51 u64 v;
52
53 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
54
55 v = readq(addr: base + FME_HDR_BITSTREAM_ID);
56
57 return scnprintf(buf, PAGE_SIZE, fmt: "0x%llx\n", (unsigned long long)v);
58}
59static DEVICE_ATTR_RO(bitstream_id);
60
61/*
62 * Bitstream (static FPGA region) meta data. It contains the synthesis
63 * date, seed and other information of this static FPGA region.
64 */
65static ssize_t bitstream_metadata_show(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 void __iomem *base;
69 u64 v;
70
71 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
72
73 v = readq(addr: base + FME_HDR_BITSTREAM_MD);
74
75 return scnprintf(buf, PAGE_SIZE, fmt: "0x%llx\n", (unsigned long long)v);
76}
77static DEVICE_ATTR_RO(bitstream_metadata);
78
79static ssize_t cache_size_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 void __iomem *base;
83 u64 v;
84
85 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
86
87 v = readq(addr: base + FME_HDR_CAP);
88
89 return sprintf(buf, fmt: "%u\n",
90 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
91}
92static DEVICE_ATTR_RO(cache_size);
93
94static ssize_t fabric_version_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 void __iomem *base;
98 u64 v;
99
100 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
101
102 v = readq(addr: base + FME_HDR_CAP);
103
104 return sprintf(buf, fmt: "%u\n",
105 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
106}
107static DEVICE_ATTR_RO(fabric_version);
108
109static ssize_t socket_id_show(struct device *dev,
110 struct device_attribute *attr, char *buf)
111{
112 void __iomem *base;
113 u64 v;
114
115 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
116
117 v = readq(addr: base + FME_HDR_CAP);
118
119 return sprintf(buf, fmt: "%u\n",
120 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
121}
122static DEVICE_ATTR_RO(socket_id);
123
124static struct attribute *fme_hdr_attrs[] = {
125 &dev_attr_ports_num.attr,
126 &dev_attr_bitstream_id.attr,
127 &dev_attr_bitstream_metadata.attr,
128 &dev_attr_cache_size.attr,
129 &dev_attr_fabric_version.attr,
130 &dev_attr_socket_id.attr,
131 NULL,
132};
133
134static const struct attribute_group fme_hdr_group = {
135 .attrs = fme_hdr_attrs,
136};
137
138static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
139 unsigned long arg)
140{
141 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
142 int port_id;
143
144 if (get_user(port_id, (int __user *)arg))
145 return -EFAULT;
146
147 return dfl_fpga_cdev_release_port(cdev, port_id);
148}
149
150static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
151 unsigned long arg)
152{
153 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
154 int port_id;
155
156 if (get_user(port_id, (int __user *)arg))
157 return -EFAULT;
158
159 return dfl_fpga_cdev_assign_port(cdev, port_id);
160}
161
162static long fme_hdr_ioctl(struct platform_device *pdev,
163 struct dfl_feature *feature,
164 unsigned int cmd, unsigned long arg)
165{
166 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev: &pdev->dev);
167
168 switch (cmd) {
169 case DFL_FPGA_FME_PORT_RELEASE:
170 return fme_hdr_ioctl_release_port(pdata, arg);
171 case DFL_FPGA_FME_PORT_ASSIGN:
172 return fme_hdr_ioctl_assign_port(pdata, arg);
173 }
174
175 return -ENODEV;
176}
177
178static const struct dfl_feature_id fme_hdr_id_table[] = {
179 {.id = FME_FEATURE_ID_HEADER,},
180 {0,}
181};
182
183static const struct dfl_feature_ops fme_hdr_ops = {
184 .ioctl = fme_hdr_ioctl,
185};
186
187#define FME_THERM_THRESHOLD 0x8
188#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
189#define TEMP_THRESHOLD1_EN BIT_ULL(7)
190#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
191#define TEMP_THRESHOLD2_EN BIT_ULL(15)
192#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
193#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
194#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
195/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
196#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
197
198#define FME_THERM_RDSENSOR_FMT1 0x10
199#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
200
201#define FME_THERM_CAP 0x20
202#define THERM_NO_THROTTLE BIT_ULL(0)
203
204#define MD_PRE_DEG
205
206static bool fme_thermal_throttle_support(void __iomem *base)
207{
208 u64 v = readq(addr: base + FME_THERM_CAP);
209
210 return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
211}
212
213static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
214 enum hwmon_sensor_types type,
215 u32 attr, int channel)
216{
217 const struct dfl_feature *feature = drvdata;
218
219 /* temperature is always supported, and check hardware cap for others */
220 if (attr == hwmon_temp_input)
221 return 0444;
222
223 return fme_thermal_throttle_support(base: feature->ioaddr) ? 0444 : 0;
224}
225
226static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
227 u32 attr, int channel, long *val)
228{
229 struct dfl_feature *feature = dev_get_drvdata(dev);
230 u64 v;
231
232 switch (attr) {
233 case hwmon_temp_input:
234 v = readq(addr: feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
235 *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
236 break;
237 case hwmon_temp_max:
238 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
239 *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
240 break;
241 case hwmon_temp_crit:
242 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
243 *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
244 break;
245 case hwmon_temp_emergency:
246 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
247 *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
248 break;
249 case hwmon_temp_max_alarm:
250 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
251 *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
252 break;
253 case hwmon_temp_crit_alarm:
254 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
255 *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
256 break;
257 default:
258 return -EOPNOTSUPP;
259 }
260
261 return 0;
262}
263
264static const struct hwmon_ops thermal_hwmon_ops = {
265 .is_visible = thermal_hwmon_attrs_visible,
266 .read = thermal_hwmon_read,
267};
268
269static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
270 HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
271 HWMON_T_MAX | HWMON_T_MAX_ALARM |
272 HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
273 NULL
274};
275
276static const struct hwmon_chip_info thermal_hwmon_chip_info = {
277 .ops = &thermal_hwmon_ops,
278 .info = thermal_hwmon_info,
279};
280
281static ssize_t temp1_max_policy_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct dfl_feature *feature = dev_get_drvdata(dev);
285 u64 v;
286
287 v = readq(addr: feature->ioaddr + FME_THERM_THRESHOLD);
288
289 return sprintf(buf, fmt: "%u\n",
290 (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
291}
292
293static DEVICE_ATTR_RO(temp1_max_policy);
294
295static struct attribute *thermal_extra_attrs[] = {
296 &dev_attr_temp1_max_policy.attr,
297 NULL,
298};
299
300static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
301 struct attribute *attr, int index)
302{
303 struct device *dev = kobj_to_dev(kobj);
304 struct dfl_feature *feature = dev_get_drvdata(dev);
305
306 return fme_thermal_throttle_support(base: feature->ioaddr) ? attr->mode : 0;
307}
308
309static const struct attribute_group thermal_extra_group = {
310 .attrs = thermal_extra_attrs,
311 .is_visible = thermal_extra_attrs_visible,
312};
313__ATTRIBUTE_GROUPS(thermal_extra);
314
315static int fme_thermal_mgmt_init(struct platform_device *pdev,
316 struct dfl_feature *feature)
317{
318 struct device *hwmon;
319
320 /*
321 * create hwmon to allow userspace monitoring temperature and other
322 * threshold information.
323 *
324 * temp1_input -> FPGA device temperature
325 * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
326 * temp1_crit -> hardware threshold 2 -> 100% throttling
327 * temp1_emergency -> hardware trip_threshold to shutdown FPGA
328 * temp1_max_alarm -> hardware threshold 1 alarm
329 * temp1_crit_alarm -> hardware threshold 2 alarm
330 *
331 * create device specific sysfs interfaces, e.g. read temp1_max_policy
332 * to understand the actual hardware throttling action (50% vs 90%).
333 *
334 * If hardware doesn't support automatic throttling per thresholds,
335 * then all above sysfs interfaces are not visible except temp1_input
336 * for temperature.
337 */
338 hwmon = devm_hwmon_device_register_with_info(dev: &pdev->dev,
339 name: "dfl_fme_thermal", drvdata: feature,
340 info: &thermal_hwmon_chip_info,
341 extra_groups: thermal_extra_groups);
342 if (IS_ERR(ptr: hwmon)) {
343 dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
344 return PTR_ERR(ptr: hwmon);
345 }
346
347 return 0;
348}
349
350static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
351 {.id = FME_FEATURE_ID_THERMAL_MGMT,},
352 {0,}
353};
354
355static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
356 .init = fme_thermal_mgmt_init,
357};
358
359#define FME_PWR_STATUS 0x8
360#define FME_LATENCY_TOLERANCE BIT_ULL(18)
361#define PWR_CONSUMED GENMASK_ULL(17, 0)
362
363#define FME_PWR_THRESHOLD 0x10
364#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
365#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
366#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
367#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
368#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
369
370#define FME_PWR_XEON_LIMIT 0x18
371#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
372#define XEON_PWR_EN BIT_ULL(15)
373#define FME_PWR_FPGA_LIMIT 0x20
374#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
375#define FPGA_PWR_EN BIT_ULL(15)
376
377static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
378 u32 attr, int channel, long *val)
379{
380 struct dfl_feature *feature = dev_get_drvdata(dev);
381 u64 v;
382
383 switch (attr) {
384 case hwmon_power_input:
385 v = readq(addr: feature->ioaddr + FME_PWR_STATUS);
386 *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
387 break;
388 case hwmon_power_max:
389 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
390 *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
391 break;
392 case hwmon_power_crit:
393 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
394 *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
395 break;
396 case hwmon_power_max_alarm:
397 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
398 *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
399 break;
400 case hwmon_power_crit_alarm:
401 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
402 *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
403 break;
404 default:
405 return -EOPNOTSUPP;
406 }
407
408 return 0;
409}
410
411static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
412 u32 attr, int channel, long val)
413{
414 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev: dev->parent);
415 struct dfl_feature *feature = dev_get_drvdata(dev);
416 int ret = 0;
417 u64 v;
418
419 val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
420
421 mutex_lock(&pdata->lock);
422
423 switch (attr) {
424 case hwmon_power_max:
425 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
426 v &= ~PWR_THRESHOLD1;
427 v |= FIELD_PREP(PWR_THRESHOLD1, val);
428 writeq(val: v, addr: feature->ioaddr + FME_PWR_THRESHOLD);
429 break;
430 case hwmon_power_crit:
431 v = readq(addr: feature->ioaddr + FME_PWR_THRESHOLD);
432 v &= ~PWR_THRESHOLD2;
433 v |= FIELD_PREP(PWR_THRESHOLD2, val);
434 writeq(val: v, addr: feature->ioaddr + FME_PWR_THRESHOLD);
435 break;
436 default:
437 ret = -EOPNOTSUPP;
438 break;
439 }
440
441 mutex_unlock(lock: &pdata->lock);
442
443 return ret;
444}
445
446static umode_t power_hwmon_attrs_visible(const void *drvdata,
447 enum hwmon_sensor_types type,
448 u32 attr, int channel)
449{
450 switch (attr) {
451 case hwmon_power_input:
452 case hwmon_power_max_alarm:
453 case hwmon_power_crit_alarm:
454 return 0444;
455 case hwmon_power_max:
456 case hwmon_power_crit:
457 return 0644;
458 }
459
460 return 0;
461}
462
463static const struct hwmon_ops power_hwmon_ops = {
464 .is_visible = power_hwmon_attrs_visible,
465 .read = power_hwmon_read,
466 .write = power_hwmon_write,
467};
468
469static const struct hwmon_channel_info * const power_hwmon_info[] = {
470 HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
471 HWMON_P_MAX | HWMON_P_MAX_ALARM |
472 HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
473 NULL
474};
475
476static const struct hwmon_chip_info power_hwmon_chip_info = {
477 .ops = &power_hwmon_ops,
478 .info = power_hwmon_info,
479};
480
481static ssize_t power1_xeon_limit_show(struct device *dev,
482 struct device_attribute *attr, char *buf)
483{
484 struct dfl_feature *feature = dev_get_drvdata(dev);
485 u16 xeon_limit = 0;
486 u64 v;
487
488 v = readq(addr: feature->ioaddr + FME_PWR_XEON_LIMIT);
489
490 if (FIELD_GET(XEON_PWR_EN, v))
491 xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
492
493 return sprintf(buf, fmt: "%u\n", xeon_limit * 100000);
494}
495
496static ssize_t power1_fpga_limit_show(struct device *dev,
497 struct device_attribute *attr, char *buf)
498{
499 struct dfl_feature *feature = dev_get_drvdata(dev);
500 u16 fpga_limit = 0;
501 u64 v;
502
503 v = readq(addr: feature->ioaddr + FME_PWR_FPGA_LIMIT);
504
505 if (FIELD_GET(FPGA_PWR_EN, v))
506 fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
507
508 return sprintf(buf, fmt: "%u\n", fpga_limit * 100000);
509}
510
511static ssize_t power1_ltr_show(struct device *dev,
512 struct device_attribute *attr, char *buf)
513{
514 struct dfl_feature *feature = dev_get_drvdata(dev);
515 u64 v;
516
517 v = readq(addr: feature->ioaddr + FME_PWR_STATUS);
518
519 return sprintf(buf, fmt: "%u\n",
520 (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
521}
522
523static DEVICE_ATTR_RO(power1_xeon_limit);
524static DEVICE_ATTR_RO(power1_fpga_limit);
525static DEVICE_ATTR_RO(power1_ltr);
526
527static struct attribute *power_extra_attrs[] = {
528 &dev_attr_power1_xeon_limit.attr,
529 &dev_attr_power1_fpga_limit.attr,
530 &dev_attr_power1_ltr.attr,
531 NULL
532};
533
534ATTRIBUTE_GROUPS(power_extra);
535
536static int fme_power_mgmt_init(struct platform_device *pdev,
537 struct dfl_feature *feature)
538{
539 struct device *hwmon;
540
541 hwmon = devm_hwmon_device_register_with_info(dev: &pdev->dev,
542 name: "dfl_fme_power", drvdata: feature,
543 info: &power_hwmon_chip_info,
544 extra_groups: power_extra_groups);
545 if (IS_ERR(ptr: hwmon)) {
546 dev_err(&pdev->dev, "Fail to register power hwmon\n");
547 return PTR_ERR(ptr: hwmon);
548 }
549
550 return 0;
551}
552
553static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
554 {.id = FME_FEATURE_ID_POWER_MGMT,},
555 {0,}
556};
557
558static const struct dfl_feature_ops fme_power_mgmt_ops = {
559 .init = fme_power_mgmt_init,
560};
561
562static struct dfl_feature_driver fme_feature_drvs[] = {
563 {
564 .id_table = fme_hdr_id_table,
565 .ops = &fme_hdr_ops,
566 },
567 {
568 .id_table = fme_pr_mgmt_id_table,
569 .ops = &fme_pr_mgmt_ops,
570 },
571 {
572 .id_table = fme_global_err_id_table,
573 .ops = &fme_global_err_ops,
574 },
575 {
576 .id_table = fme_thermal_mgmt_id_table,
577 .ops = &fme_thermal_mgmt_ops,
578 },
579 {
580 .id_table = fme_power_mgmt_id_table,
581 .ops = &fme_power_mgmt_ops,
582 },
583 {
584 .id_table = fme_perf_id_table,
585 .ops = &fme_perf_ops,
586 },
587 {
588 .ops = NULL,
589 },
590};
591
592static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
593 unsigned long arg)
594{
595 /* No extension support for now */
596 return 0;
597}
598
599static int fme_open(struct inode *inode, struct file *filp)
600{
601 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
602 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev: &fdev->dev);
603 int ret;
604
605 if (WARN_ON(!pdata))
606 return -ENODEV;
607
608 mutex_lock(&pdata->lock);
609 ret = dfl_feature_dev_use_begin(pdata, excl: filp->f_flags & O_EXCL);
610 if (!ret) {
611 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
612 dfl_feature_dev_use_count(pdata));
613 filp->private_data = pdata;
614 }
615 mutex_unlock(lock: &pdata->lock);
616
617 return ret;
618}
619
620static int fme_release(struct inode *inode, struct file *filp)
621{
622 struct dfl_feature_platform_data *pdata = filp->private_data;
623 struct platform_device *pdev = pdata->dev;
624 struct dfl_feature *feature;
625
626 dev_dbg(&pdev->dev, "Device File Release\n");
627
628 mutex_lock(&pdata->lock);
629 dfl_feature_dev_use_end(pdata);
630
631 if (!dfl_feature_dev_use_count(pdata))
632 dfl_fpga_dev_for_each_feature(pdata, feature)
633 dfl_fpga_set_irq_triggers(feature, start: 0,
634 count: feature->nr_irqs, NULL);
635 mutex_unlock(lock: &pdata->lock);
636
637 return 0;
638}
639
640static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
641{
642 struct dfl_feature_platform_data *pdata = filp->private_data;
643 struct platform_device *pdev = pdata->dev;
644 struct dfl_feature *f;
645 long ret;
646
647 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
648
649 switch (cmd) {
650 case DFL_FPGA_GET_API_VERSION:
651 return DFL_FPGA_API_VERSION;
652 case DFL_FPGA_CHECK_EXTENSION:
653 return fme_ioctl_check_extension(pdata, arg);
654 default:
655 /*
656 * Let sub-feature's ioctl function to handle the cmd.
657 * Sub-feature's ioctl returns -ENODEV when cmd is not
658 * handled in this sub feature, and returns 0 or other
659 * error code if cmd is handled.
660 */
661 dfl_fpga_dev_for_each_feature(pdata, f) {
662 if (f->ops && f->ops->ioctl) {
663 ret = f->ops->ioctl(pdev, f, cmd, arg);
664 if (ret != -ENODEV)
665 return ret;
666 }
667 }
668 }
669
670 return -EINVAL;
671}
672
673static int fme_dev_init(struct platform_device *pdev)
674{
675 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev: &pdev->dev);
676 struct dfl_fme *fme;
677
678 fme = devm_kzalloc(dev: &pdev->dev, size: sizeof(*fme), GFP_KERNEL);
679 if (!fme)
680 return -ENOMEM;
681
682 fme->pdata = pdata;
683
684 mutex_lock(&pdata->lock);
685 dfl_fpga_pdata_set_private(pdata, private: fme);
686 mutex_unlock(lock: &pdata->lock);
687
688 return 0;
689}
690
691static void fme_dev_destroy(struct platform_device *pdev)
692{
693 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev: &pdev->dev);
694
695 mutex_lock(&pdata->lock);
696 dfl_fpga_pdata_set_private(pdata, NULL);
697 mutex_unlock(lock: &pdata->lock);
698}
699
700static const struct file_operations fme_fops = {
701 .owner = THIS_MODULE,
702 .open = fme_open,
703 .release = fme_release,
704 .unlocked_ioctl = fme_ioctl,
705};
706
707static int fme_probe(struct platform_device *pdev)
708{
709 int ret;
710
711 ret = fme_dev_init(pdev);
712 if (ret)
713 goto exit;
714
715 ret = dfl_fpga_dev_feature_init(pdev, feature_drvs: fme_feature_drvs);
716 if (ret)
717 goto dev_destroy;
718
719 ret = dfl_fpga_dev_ops_register(pdev, fops: &fme_fops, THIS_MODULE);
720 if (ret)
721 goto feature_uinit;
722
723 return 0;
724
725feature_uinit:
726 dfl_fpga_dev_feature_uinit(pdev);
727dev_destroy:
728 fme_dev_destroy(pdev);
729exit:
730 return ret;
731}
732
733static int fme_remove(struct platform_device *pdev)
734{
735 dfl_fpga_dev_ops_unregister(pdev);
736 dfl_fpga_dev_feature_uinit(pdev);
737 fme_dev_destroy(pdev);
738
739 return 0;
740}
741
742static const struct attribute_group *fme_dev_groups[] = {
743 &fme_hdr_group,
744 &fme_global_err_group,
745 NULL
746};
747
748static struct platform_driver fme_driver = {
749 .driver = {
750 .name = DFL_FPGA_FEATURE_DEV_FME,
751 .dev_groups = fme_dev_groups,
752 },
753 .probe = fme_probe,
754 .remove = fme_remove,
755};
756
757module_platform_driver(fme_driver);
758
759MODULE_DESCRIPTION("FPGA Management Engine driver");
760MODULE_AUTHOR("Intel Corporation");
761MODULE_LICENSE("GPL v2");
762MODULE_ALIAS("platform:dfl-fme");
763

source code of linux/drivers/fpga/dfl-fme-main.c