1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MSI framework for platform devices
4 *
5 * Copyright (C) 2015 ARM Limited, All Rights Reserved.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8
9#include <linux/device.h>
10#include <linux/idr.h>
11#include <linux/irq.h>
12#include <linux/irqdomain.h>
13#include <linux/msi.h>
14#include <linux/slab.h>
15
16#define DEV_ID_SHIFT 21
17#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
18
19/*
20 * Internal data structure containing a (made up, but unique) devid
21 * and the callback to write the MSI message.
22 */
23struct platform_msi_priv_data {
24 struct device *dev;
25 void *host_data;
26 msi_alloc_info_t arg;
27 irq_write_msi_msg_t write_msg;
28 int devid;
29};
30
31/* The devid allocator */
32static DEFINE_IDA(platform_msi_devid_ida);
33
34#ifdef GENERIC_MSI_DOMAIN_OPS
35/*
36 * Convert an msi_desc to a globaly unique identifier (per-device
37 * devid + msi_desc position in the msi_list).
38 */
39static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
40{
41 u32 devid;
42
43 devid = desc->platform.msi_priv_data->devid;
44
45 return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
46}
47
48static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
49{
50 arg->desc = desc;
51 arg->hwirq = platform_msi_calc_hwirq(desc);
52}
53
54static int platform_msi_init(struct irq_domain *domain,
55 struct msi_domain_info *info,
56 unsigned int virq, irq_hw_number_t hwirq,
57 msi_alloc_info_t *arg)
58{
59 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
60 info->chip, info->chip_data);
61}
62#else
63#define platform_msi_set_desc NULL
64#define platform_msi_init NULL
65#endif
66
67static void platform_msi_update_dom_ops(struct msi_domain_info *info)
68{
69 struct msi_domain_ops *ops = info->ops;
70
71 BUG_ON(!ops);
72
73 if (ops->msi_init == NULL)
74 ops->msi_init = platform_msi_init;
75 if (ops->set_desc == NULL)
76 ops->set_desc = platform_msi_set_desc;
77}
78
79static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
80{
81 struct msi_desc *desc = irq_data_get_msi_desc(data);
82 struct platform_msi_priv_data *priv_data;
83
84 priv_data = desc->platform.msi_priv_data;
85
86 priv_data->write_msg(desc, msg);
87}
88
89static void platform_msi_update_chip_ops(struct msi_domain_info *info)
90{
91 struct irq_chip *chip = info->chip;
92
93 BUG_ON(!chip);
94 if (!chip->irq_mask)
95 chip->irq_mask = irq_chip_mask_parent;
96 if (!chip->irq_unmask)
97 chip->irq_unmask = irq_chip_unmask_parent;
98 if (!chip->irq_eoi)
99 chip->irq_eoi = irq_chip_eoi_parent;
100 if (!chip->irq_set_affinity)
101 chip->irq_set_affinity = msi_domain_set_affinity;
102 if (!chip->irq_write_msi_msg)
103 chip->irq_write_msi_msg = platform_msi_write_msg;
104 if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
105 !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
106 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
107}
108
109static void platform_msi_free_descs(struct device *dev, int base, int nvec)
110{
111 struct msi_desc *desc, *tmp;
112
113 list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
114 if (desc->platform.msi_index >= base &&
115 desc->platform.msi_index < (base + nvec)) {
116 list_del(&desc->list);
117 free_msi_entry(desc);
118 }
119 }
120}
121
122static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
123 int nvec,
124 struct platform_msi_priv_data *data)
125
126{
127 struct msi_desc *desc;
128 int i, base = 0;
129
130 if (!list_empty(dev_to_msi_list(dev))) {
131 desc = list_last_entry(dev_to_msi_list(dev),
132 struct msi_desc, list);
133 base = desc->platform.msi_index + 1;
134 }
135
136 for (i = 0; i < nvec; i++) {
137 desc = alloc_msi_entry(dev, 1, NULL);
138 if (!desc)
139 break;
140
141 desc->platform.msi_priv_data = data;
142 desc->platform.msi_index = base + i;
143 desc->irq = virq ? virq + i : 0;
144
145 list_add_tail(&desc->list, dev_to_msi_list(dev));
146 }
147
148 if (i != nvec) {
149 /* Clean up the mess */
150 platform_msi_free_descs(dev, base, nvec);
151
152 return -ENOMEM;
153 }
154
155 return 0;
156}
157
158static int platform_msi_alloc_descs(struct device *dev, int nvec,
159 struct platform_msi_priv_data *data)
160
161{
162 return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
163}
164
165/**
166 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
167 * @fwnode: Optional fwnode of the interrupt controller
168 * @info: MSI domain info
169 * @parent: Parent irq domain
170 *
171 * Updates the domain and chip ops and creates a platform MSI
172 * interrupt domain.
173 *
174 * Returns:
175 * A domain pointer or NULL in case of failure.
176 */
177struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
178 struct msi_domain_info *info,
179 struct irq_domain *parent)
180{
181 struct irq_domain *domain;
182
183 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
184 platform_msi_update_dom_ops(info);
185 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
186 platform_msi_update_chip_ops(info);
187
188 domain = msi_create_irq_domain(fwnode, info, parent);
189 if (domain)
190 irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
191
192 return domain;
193}
194
195static struct platform_msi_priv_data *
196platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
197 irq_write_msi_msg_t write_msi_msg)
198{
199 struct platform_msi_priv_data *datap;
200 /*
201 * Limit the number of interrupts to 2048 per device. Should we
202 * need to bump this up, DEV_ID_SHIFT should be adjusted
203 * accordingly (which would impact the max number of MSI
204 * capable devices).
205 */
206 if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
207 return ERR_PTR(-EINVAL);
208
209 if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
210 dev_err(dev, "Incompatible msi_domain, giving up\n");
211 return ERR_PTR(-EINVAL);
212 }
213
214 /* Already had a helping of MSI? Greed... */
215 if (!list_empty(dev_to_msi_list(dev)))
216 return ERR_PTR(-EBUSY);
217
218 datap = kzalloc(sizeof(*datap), GFP_KERNEL);
219 if (!datap)
220 return ERR_PTR(-ENOMEM);
221
222 datap->devid = ida_simple_get(&platform_msi_devid_ida,
223 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
224 if (datap->devid < 0) {
225 int err = datap->devid;
226 kfree(datap);
227 return ERR_PTR(err);
228 }
229
230 datap->write_msg = write_msi_msg;
231 datap->dev = dev;
232
233 return datap;
234}
235
236static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
237{
238 ida_simple_remove(&platform_msi_devid_ida, data->devid);
239 kfree(data);
240}
241
242/**
243 * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
244 * @dev: The device for which to allocate interrupts
245 * @nvec: The number of interrupts to allocate
246 * @write_msi_msg: Callback to write an interrupt message for @dev
247 *
248 * Returns:
249 * Zero for success, or an error code in case of failure
250 */
251int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
252 irq_write_msi_msg_t write_msi_msg)
253{
254 struct platform_msi_priv_data *priv_data;
255 int err;
256
257 priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
258 if (IS_ERR(priv_data))
259 return PTR_ERR(priv_data);
260
261 err = platform_msi_alloc_descs(dev, nvec, priv_data);
262 if (err)
263 goto out_free_priv_data;
264
265 err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
266 if (err)
267 goto out_free_desc;
268
269 return 0;
270
271out_free_desc:
272 platform_msi_free_descs(dev, 0, nvec);
273out_free_priv_data:
274 platform_msi_free_priv_data(priv_data);
275
276 return err;
277}
278EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
279
280/**
281 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
282 * @dev: The device for which to free interrupts
283 */
284void platform_msi_domain_free_irqs(struct device *dev)
285{
286 if (!list_empty(dev_to_msi_list(dev))) {
287 struct msi_desc *desc;
288
289 desc = first_msi_entry(dev);
290 platform_msi_free_priv_data(desc->platform.msi_priv_data);
291 }
292
293 msi_domain_free_irqs(dev->msi_domain, dev);
294 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
295}
296EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
297
298/**
299 * platform_msi_get_host_data - Query the private data associated with
300 * a platform-msi domain
301 * @domain: The platform-msi domain
302 *
303 * Returns the private data provided when calling
304 * platform_msi_create_device_domain.
305 */
306void *platform_msi_get_host_data(struct irq_domain *domain)
307{
308 struct platform_msi_priv_data *data = domain->host_data;
309 return data->host_data;
310}
311
312/**
313 * platform_msi_create_device_domain - Create a platform-msi domain
314 *
315 * @dev: The device generating the MSIs
316 * @nvec: The number of MSIs that need to be allocated
317 * @write_msi_msg: Callback to write an interrupt message for @dev
318 * @ops: The hierarchy domain operations to use
319 * @host_data: Private data associated to this domain
320 *
321 * Returns an irqdomain for @nvec interrupts
322 */
323struct irq_domain *
324__platform_msi_create_device_domain(struct device *dev,
325 unsigned int nvec,
326 bool is_tree,
327 irq_write_msi_msg_t write_msi_msg,
328 const struct irq_domain_ops *ops,
329 void *host_data)
330{
331 struct platform_msi_priv_data *data;
332 struct irq_domain *domain;
333 int err;
334
335 data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
336 if (IS_ERR(data))
337 return NULL;
338
339 data->host_data = host_data;
340 domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
341 is_tree ? 0 : nvec,
342 dev->fwnode, ops, data);
343 if (!domain)
344 goto free_priv;
345
346 err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
347 if (err)
348 goto free_domain;
349
350 return domain;
351
352free_domain:
353 irq_domain_remove(domain);
354free_priv:
355 platform_msi_free_priv_data(data);
356 return NULL;
357}
358
359/**
360 * platform_msi_domain_free - Free interrupts associated with a platform-msi
361 * domain
362 *
363 * @domain: The platform-msi domain
364 * @virq: The base irq from which to perform the free operation
365 * @nvec: How many interrupts to free from @virq
366 */
367void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
368 unsigned int nvec)
369{
370 struct platform_msi_priv_data *data = domain->host_data;
371 struct msi_desc *desc, *tmp;
372 for_each_msi_entry_safe(desc, tmp, data->dev) {
373 if (WARN_ON(!desc->irq || desc->nvec_used != 1))
374 return;
375 if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
376 continue;
377
378 irq_domain_free_irqs_common(domain, desc->irq, 1);
379 list_del(&desc->list);
380 free_msi_entry(desc);
381 }
382}
383
384/**
385 * platform_msi_domain_alloc - Allocate interrupts associated with
386 * a platform-msi domain
387 *
388 * @domain: The platform-msi domain
389 * @virq: The base irq from which to perform the allocate operation
390 * @nvec: How many interrupts to free from @virq
391 *
392 * Return 0 on success, or an error code on failure. Must be called
393 * with irq_domain_mutex held (which can only be done as part of a
394 * top-level interrupt allocation).
395 */
396int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
397 unsigned int nr_irqs)
398{
399 struct platform_msi_priv_data *data = domain->host_data;
400 int err;
401
402 err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
403 if (err)
404 return err;
405
406 err = msi_domain_populate_irqs(domain->parent, data->dev,
407 virq, nr_irqs, &data->arg);
408 if (err)
409 platform_msi_domain_free(domain, virq, nr_irqs);
410
411 return err;
412}
413