1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Analog Devices Generic AXI ADC IP core
4 * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
5 *
6 * Copyright 2012-2020 Analog Devices Inc.
7 */
8
9#include <linux/bitfield.h>
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/delay.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/property.h>
18#include <linux/regmap.h>
19#include <linux/slab.h>
20
21#include <linux/fpga/adi-axi-common.h>
22
23#include <linux/iio/backend.h>
24#include <linux/iio/buffer-dmaengine.h>
25#include <linux/iio/buffer.h>
26#include <linux/iio/iio.h>
27
28/*
29 * Register definitions:
30 * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
31 */
32
33/* ADC controls */
34
35#define ADI_AXI_REG_RSTN 0x0040
36#define ADI_AXI_REG_RSTN_CE_N BIT(2)
37#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
38#define ADI_AXI_REG_RSTN_RSTN BIT(0)
39
40/* ADC Channel controls */
41
42#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
43#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11)
44#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
45#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
46#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
47#define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4)
48#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
49#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
50#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
51#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1)
52#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0)
53
54#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
55 (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \
56 ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
57 ADI_AXI_REG_CHAN_CTRL_ENABLE)
58
59struct adi_axi_adc_state {
60 struct regmap *regmap;
61 struct device *dev;
62};
63
64static int axi_adc_enable(struct iio_backend *back)
65{
66 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
67 int ret;
68
69 ret = regmap_set_bits(map: st->regmap, ADI_AXI_REG_RSTN,
70 ADI_AXI_REG_RSTN_MMCM_RSTN);
71 if (ret)
72 return ret;
73
74 fsleep(usecs: 10000);
75 return regmap_set_bits(map: st->regmap, ADI_AXI_REG_RSTN,
76 ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
77}
78
79static void axi_adc_disable(struct iio_backend *back)
80{
81 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
82
83 regmap_write(map: st->regmap, ADI_AXI_REG_RSTN, val: 0);
84}
85
86static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
87 const struct iio_backend_data_fmt *data)
88{
89 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
90 u32 val;
91
92 if (!data->enable)
93 return regmap_clear_bits(map: st->regmap,
94 ADI_AXI_REG_CHAN_CTRL(chan),
95 ADI_AXI_REG_CHAN_CTRL_FMT_EN);
96
97 val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
98 if (data->sign_extend)
99 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
100 if (data->type == IIO_BACKEND_OFFSET_BINARY)
101 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
102
103 return regmap_update_bits(map: st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
104 ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
105}
106
107static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
108{
109 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
110
111 return regmap_set_bits(map: st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
112 ADI_AXI_REG_CHAN_CTRL_ENABLE);
113}
114
115static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
116{
117 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
118
119 return regmap_clear_bits(map: st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
120 ADI_AXI_REG_CHAN_CTRL_ENABLE);
121}
122
123static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
124 struct iio_dev *indio_dev)
125{
126 struct adi_axi_adc_state *st = iio_backend_get_priv(conv: back);
127 struct iio_buffer *buffer;
128 const char *dma_name;
129 int ret;
130
131 if (device_property_read_string(dev: st->dev, propname: "dma-names", val: &dma_name))
132 dma_name = "rx";
133
134 buffer = iio_dmaengine_buffer_alloc(dev: st->dev, channel: dma_name);
135 if (IS_ERR(ptr: buffer)) {
136 dev_err(st->dev, "Could not get DMA buffer, %ld\n",
137 PTR_ERR(buffer));
138 return ERR_CAST(ptr: buffer);
139 }
140
141 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
142 ret = iio_device_attach_buffer(indio_dev, buffer);
143 if (ret)
144 return ERR_PTR(error: ret);
145
146 return buffer;
147}
148
149static void axi_adc_free_buffer(struct iio_backend *back,
150 struct iio_buffer *buffer)
151{
152 iio_dmaengine_buffer_free(buffer);
153}
154
155static const struct regmap_config axi_adc_regmap_config = {
156 .val_bits = 32,
157 .reg_bits = 32,
158 .reg_stride = 4,
159 .max_register = 0x0800,
160};
161
162static const struct iio_backend_ops adi_axi_adc_generic = {
163 .enable = axi_adc_enable,
164 .disable = axi_adc_disable,
165 .data_format_set = axi_adc_data_format_set,
166 .chan_enable = axi_adc_chan_enable,
167 .chan_disable = axi_adc_chan_disable,
168 .request_buffer = axi_adc_request_buffer,
169 .free_buffer = axi_adc_free_buffer,
170};
171
172static int adi_axi_adc_probe(struct platform_device *pdev)
173{
174 const unsigned int *expected_ver;
175 struct adi_axi_adc_state *st;
176 void __iomem *base;
177 unsigned int ver;
178 int ret;
179
180 st = devm_kzalloc(dev: &pdev->dev, size: sizeof(*st), GFP_KERNEL);
181 if (!st)
182 return -ENOMEM;
183
184 base = devm_platform_ioremap_resource(pdev, index: 0);
185 if (IS_ERR(ptr: base))
186 return PTR_ERR(ptr: base);
187
188 st->dev = &pdev->dev;
189 st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
190 &axi_adc_regmap_config);
191 if (IS_ERR(ptr: st->regmap))
192 return PTR_ERR(ptr: st->regmap);
193
194 expected_ver = device_get_match_data(dev: &pdev->dev);
195 if (!expected_ver)
196 return -ENODEV;
197
198 /*
199 * Force disable the core. Up to the frontend to enable us. And we can
200 * still read/write registers...
201 */
202 ret = regmap_write(map: st->regmap, ADI_AXI_REG_RSTN, val: 0);
203 if (ret)
204 return ret;
205
206 ret = regmap_read(map: st->regmap, ADI_AXI_REG_VERSION, val: &ver);
207 if (ret)
208 return ret;
209
210 if (*expected_ver > ver) {
211 dev_err(&pdev->dev,
212 "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
213 ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
214 ADI_AXI_PCORE_VER_MINOR(*expected_ver),
215 ADI_AXI_PCORE_VER_PATCH(*expected_ver),
216 ADI_AXI_PCORE_VER_MAJOR(ver),
217 ADI_AXI_PCORE_VER_MINOR(ver),
218 ADI_AXI_PCORE_VER_PATCH(ver));
219 return -ENODEV;
220 }
221
222 ret = devm_iio_backend_register(dev: &pdev->dev, ops: &adi_axi_adc_generic, priv: st);
223 if (ret)
224 return ret;
225
226 dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
227 ADI_AXI_PCORE_VER_MAJOR(ver),
228 ADI_AXI_PCORE_VER_MINOR(ver),
229 ADI_AXI_PCORE_VER_PATCH(ver));
230
231 return 0;
232}
233
234static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
235
236/* Match table for of_platform binding */
237static const struct of_device_id adi_axi_adc_of_match[] = {
238 { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
239 { /* end of list */ }
240};
241MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
242
243static struct platform_driver adi_axi_adc_driver = {
244 .driver = {
245 .name = KBUILD_MODNAME,
246 .of_match_table = adi_axi_adc_of_match,
247 },
248 .probe = adi_axi_adc_probe,
249};
250module_platform_driver(adi_axi_adc_driver);
251
252MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
253MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
254MODULE_LICENSE("GPL v2");
255MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
256MODULE_IMPORT_NS(IIO_BACKEND);
257

source code of linux/drivers/iio/adc/adi-axi-adc.c