1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // |
3 | // regmap based irq_chip |
4 | // |
5 | // Copyright 2011 Wolfson Microelectronics plc |
6 | // |
7 | // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> |
8 | |
9 | #include <linux/device.h> |
10 | #include <linux/export.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/irq.h> |
13 | #include <linux/irqdomain.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/regmap.h> |
16 | #include <linux/slab.h> |
17 | |
18 | #include "internal.h" |
19 | |
20 | struct regmap_irq_chip_data { |
21 | struct mutex lock; |
22 | struct irq_chip irq_chip; |
23 | |
24 | struct regmap *map; |
25 | const struct regmap_irq_chip *chip; |
26 | |
27 | int irq_base; |
28 | struct irq_domain *domain; |
29 | |
30 | int irq; |
31 | int wake_count; |
32 | |
33 | void *status_reg_buf; |
34 | unsigned int *main_status_buf; |
35 | unsigned int *status_buf; |
36 | unsigned int *mask_buf; |
37 | unsigned int *mask_buf_def; |
38 | unsigned int *wake_buf; |
39 | unsigned int *type_buf; |
40 | unsigned int *type_buf_def; |
41 | unsigned int **config_buf; |
42 | |
43 | unsigned int irq_reg_stride; |
44 | |
45 | unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, |
46 | unsigned int base, int index); |
47 | |
48 | unsigned int clear_status:1; |
49 | }; |
50 | |
51 | static inline const |
52 | struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, |
53 | int irq) |
54 | { |
55 | return &data->chip->irqs[irq]; |
56 | } |
57 | |
58 | static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) |
59 | { |
60 | struct regmap *map = data->map; |
61 | |
62 | /* |
63 | * While possible that a user-defined ->get_irq_reg() callback might |
64 | * be linear enough to support bulk reads, most of the time it won't. |
65 | * Therefore only allow them if the default callback is being used. |
66 | */ |
67 | return data->irq_reg_stride == 1 && map->reg_stride == 1 && |
68 | data->get_irq_reg == regmap_irq_get_irq_reg_linear && |
69 | !map->use_single_read; |
70 | } |
71 | |
72 | static void regmap_irq_lock(struct irq_data *data) |
73 | { |
74 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
75 | |
76 | mutex_lock(&d->lock); |
77 | } |
78 | |
79 | static void regmap_irq_sync_unlock(struct irq_data *data) |
80 | { |
81 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
82 | struct regmap *map = d->map; |
83 | int i, j, ret; |
84 | u32 reg; |
85 | u32 val; |
86 | |
87 | if (d->chip->runtime_pm) { |
88 | ret = pm_runtime_get_sync(dev: map->dev); |
89 | if (ret < 0) |
90 | dev_err(map->dev, "IRQ sync failed to resume: %d\n" , |
91 | ret); |
92 | } |
93 | |
94 | if (d->clear_status) { |
95 | for (i = 0; i < d->chip->num_regs; i++) { |
96 | reg = d->get_irq_reg(d, d->chip->status_base, i); |
97 | |
98 | ret = regmap_read(map, reg, val: &val); |
99 | if (ret) |
100 | dev_err(d->map->dev, |
101 | "Failed to clear the interrupt status bits\n" ); |
102 | } |
103 | |
104 | d->clear_status = false; |
105 | } |
106 | |
107 | /* |
108 | * If there's been a change in the mask write it back to the |
109 | * hardware. We rely on the use of the regmap core cache to |
110 | * suppress pointless writes. |
111 | */ |
112 | for (i = 0; i < d->chip->num_regs; i++) { |
113 | if (d->chip->handle_mask_sync) |
114 | d->chip->handle_mask_sync(i, d->mask_buf_def[i], |
115 | d->mask_buf[i], |
116 | d->chip->irq_drv_data); |
117 | |
118 | if (d->chip->mask_base && !d->chip->handle_mask_sync) { |
119 | reg = d->get_irq_reg(d, d->chip->mask_base, i); |
120 | ret = regmap_update_bits(map: d->map, reg, |
121 | mask: d->mask_buf_def[i], |
122 | val: d->mask_buf[i]); |
123 | if (ret) |
124 | dev_err(d->map->dev, "Failed to sync masks in %x\n" , reg); |
125 | } |
126 | |
127 | if (d->chip->unmask_base && !d->chip->handle_mask_sync) { |
128 | reg = d->get_irq_reg(d, d->chip->unmask_base, i); |
129 | ret = regmap_update_bits(map: d->map, reg, |
130 | mask: d->mask_buf_def[i], val: ~d->mask_buf[i]); |
131 | if (ret) |
132 | dev_err(d->map->dev, "Failed to sync masks in %x\n" , |
133 | reg); |
134 | } |
135 | |
136 | reg = d->get_irq_reg(d, d->chip->wake_base, i); |
137 | if (d->wake_buf) { |
138 | if (d->chip->wake_invert) |
139 | ret = regmap_update_bits(map: d->map, reg, |
140 | mask: d->mask_buf_def[i], |
141 | val: ~d->wake_buf[i]); |
142 | else |
143 | ret = regmap_update_bits(map: d->map, reg, |
144 | mask: d->mask_buf_def[i], |
145 | val: d->wake_buf[i]); |
146 | if (ret != 0) |
147 | dev_err(d->map->dev, |
148 | "Failed to sync wakes in %x: %d\n" , |
149 | reg, ret); |
150 | } |
151 | |
152 | if (!d->chip->init_ack_masked) |
153 | continue; |
154 | /* |
155 | * Ack all the masked interrupts unconditionally, |
156 | * OR if there is masked interrupt which hasn't been Acked, |
157 | * it'll be ignored in irq handler, then may introduce irq storm |
158 | */ |
159 | if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { |
160 | reg = d->get_irq_reg(d, d->chip->ack_base, i); |
161 | |
162 | /* some chips ack by write 0 */ |
163 | if (d->chip->ack_invert) |
164 | ret = regmap_write(map, reg, val: ~d->mask_buf[i]); |
165 | else |
166 | ret = regmap_write(map, reg, val: d->mask_buf[i]); |
167 | if (d->chip->clear_ack) { |
168 | if (d->chip->ack_invert && !ret) |
169 | ret = regmap_write(map, reg, UINT_MAX); |
170 | else if (!ret) |
171 | ret = regmap_write(map, reg, val: 0); |
172 | } |
173 | if (ret != 0) |
174 | dev_err(d->map->dev, "Failed to ack 0x%x: %d\n" , |
175 | reg, ret); |
176 | } |
177 | } |
178 | |
179 | for (i = 0; i < d->chip->num_config_bases; i++) { |
180 | for (j = 0; j < d->chip->num_config_regs; j++) { |
181 | reg = d->get_irq_reg(d, d->chip->config_base[i], j); |
182 | ret = regmap_write(map, reg, val: d->config_buf[i][j]); |
183 | if (ret) |
184 | dev_err(d->map->dev, |
185 | "Failed to write config %x: %d\n" , |
186 | reg, ret); |
187 | } |
188 | } |
189 | |
190 | if (d->chip->runtime_pm) |
191 | pm_runtime_put(dev: map->dev); |
192 | |
193 | /* If we've changed our wakeup count propagate it to the parent */ |
194 | if (d->wake_count < 0) |
195 | for (i = d->wake_count; i < 0; i++) |
196 | irq_set_irq_wake(irq: d->irq, on: 0); |
197 | else if (d->wake_count > 0) |
198 | for (i = 0; i < d->wake_count; i++) |
199 | irq_set_irq_wake(irq: d->irq, on: 1); |
200 | |
201 | d->wake_count = 0; |
202 | |
203 | mutex_unlock(lock: &d->lock); |
204 | } |
205 | |
206 | static void regmap_irq_enable(struct irq_data *data) |
207 | { |
208 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
209 | struct regmap *map = d->map; |
210 | const struct regmap_irq *irq_data = irq_to_regmap_irq(data: d, irq: data->hwirq); |
211 | unsigned int reg = irq_data->reg_offset / map->reg_stride; |
212 | unsigned int mask; |
213 | |
214 | /* |
215 | * The type_in_mask flag means that the underlying hardware uses |
216 | * separate mask bits for each interrupt trigger type, but we want |
217 | * to have a single logical interrupt with a configurable type. |
218 | * |
219 | * If the interrupt we're enabling defines any supported types |
220 | * then instead of using the regular mask bits for this interrupt, |
221 | * use the value previously written to the type buffer at the |
222 | * corresponding offset in regmap_irq_set_type(). |
223 | */ |
224 | if (d->chip->type_in_mask && irq_data->type.types_supported) |
225 | mask = d->type_buf[reg] & irq_data->mask; |
226 | else |
227 | mask = irq_data->mask; |
228 | |
229 | if (d->chip->clear_on_unmask) |
230 | d->clear_status = true; |
231 | |
232 | d->mask_buf[reg] &= ~mask; |
233 | } |
234 | |
235 | static void regmap_irq_disable(struct irq_data *data) |
236 | { |
237 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
238 | struct regmap *map = d->map; |
239 | const struct regmap_irq *irq_data = irq_to_regmap_irq(data: d, irq: data->hwirq); |
240 | |
241 | d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; |
242 | } |
243 | |
244 | static int regmap_irq_set_type(struct irq_data *data, unsigned int type) |
245 | { |
246 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
247 | struct regmap *map = d->map; |
248 | const struct regmap_irq *irq_data = irq_to_regmap_irq(data: d, irq: data->hwirq); |
249 | int reg, ret; |
250 | const struct regmap_irq_type *t = &irq_data->type; |
251 | |
252 | if ((t->types_supported & type) != type) |
253 | return 0; |
254 | |
255 | reg = t->type_reg_offset / map->reg_stride; |
256 | |
257 | if (d->chip->type_in_mask) { |
258 | ret = regmap_irq_set_type_config_simple(buf: &d->type_buf, type, |
259 | irq_data, idx: reg, irq_drv_data: d->chip->irq_drv_data); |
260 | if (ret) |
261 | return ret; |
262 | } |
263 | |
264 | if (d->chip->set_type_config) { |
265 | ret = d->chip->set_type_config(d->config_buf, type, irq_data, |
266 | reg, d->chip->irq_drv_data); |
267 | if (ret) |
268 | return ret; |
269 | } |
270 | |
271 | return 0; |
272 | } |
273 | |
274 | static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) |
275 | { |
276 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(d: data); |
277 | struct regmap *map = d->map; |
278 | const struct regmap_irq *irq_data = irq_to_regmap_irq(data: d, irq: data->hwirq); |
279 | |
280 | if (on) { |
281 | if (d->wake_buf) |
282 | d->wake_buf[irq_data->reg_offset / map->reg_stride] |
283 | &= ~irq_data->mask; |
284 | d->wake_count++; |
285 | } else { |
286 | if (d->wake_buf) |
287 | d->wake_buf[irq_data->reg_offset / map->reg_stride] |
288 | |= irq_data->mask; |
289 | d->wake_count--; |
290 | } |
291 | |
292 | return 0; |
293 | } |
294 | |
295 | static const struct irq_chip regmap_irq_chip = { |
296 | .irq_bus_lock = regmap_irq_lock, |
297 | .irq_bus_sync_unlock = regmap_irq_sync_unlock, |
298 | .irq_disable = regmap_irq_disable, |
299 | .irq_enable = regmap_irq_enable, |
300 | .irq_set_type = regmap_irq_set_type, |
301 | .irq_set_wake = regmap_irq_set_wake, |
302 | }; |
303 | |
304 | static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, |
305 | unsigned int b) |
306 | { |
307 | const struct regmap_irq_chip *chip = data->chip; |
308 | struct regmap *map = data->map; |
309 | struct regmap_irq_sub_irq_map *subreg; |
310 | unsigned int reg; |
311 | int i, ret = 0; |
312 | |
313 | if (!chip->sub_reg_offsets) { |
314 | reg = data->get_irq_reg(data, chip->status_base, b); |
315 | ret = regmap_read(map, reg, val: &data->status_buf[b]); |
316 | } else { |
317 | /* |
318 | * Note we can't use ->get_irq_reg() here because the offsets |
319 | * in 'subreg' are *not* interchangeable with indices. |
320 | */ |
321 | subreg = &chip->sub_reg_offsets[b]; |
322 | for (i = 0; i < subreg->num_regs; i++) { |
323 | unsigned int offset = subreg->offset[i]; |
324 | unsigned int index = offset / map->reg_stride; |
325 | |
326 | ret = regmap_read(map, reg: chip->status_base + offset, |
327 | val: &data->status_buf[index]); |
328 | if (ret) |
329 | break; |
330 | } |
331 | } |
332 | return ret; |
333 | } |
334 | |
335 | static irqreturn_t regmap_irq_thread(int irq, void *d) |
336 | { |
337 | struct regmap_irq_chip_data *data = d; |
338 | const struct regmap_irq_chip *chip = data->chip; |
339 | struct regmap *map = data->map; |
340 | int ret, i; |
341 | bool handled = false; |
342 | u32 reg; |
343 | |
344 | if (chip->handle_pre_irq) |
345 | chip->handle_pre_irq(chip->irq_drv_data); |
346 | |
347 | if (chip->runtime_pm) { |
348 | ret = pm_runtime_get_sync(dev: map->dev); |
349 | if (ret < 0) { |
350 | dev_err(map->dev, "IRQ thread failed to resume: %d\n" , |
351 | ret); |
352 | goto exit; |
353 | } |
354 | } |
355 | |
356 | /* |
357 | * Read only registers with active IRQs if the chip has 'main status |
358 | * register'. Else read in the statuses, using a single bulk read if |
359 | * possible in order to reduce the I/O overheads. |
360 | */ |
361 | |
362 | if (chip->no_status) { |
363 | /* no status register so default to all active */ |
364 | memset32(s: data->status_buf, GENMASK(31, 0), n: chip->num_regs); |
365 | } else if (chip->num_main_regs) { |
366 | unsigned int max_main_bits; |
367 | unsigned long size; |
368 | |
369 | size = chip->num_regs * sizeof(unsigned int); |
370 | |
371 | max_main_bits = (chip->num_main_status_bits) ? |
372 | chip->num_main_status_bits : chip->num_regs; |
373 | /* Clear the status buf as we don't read all status regs */ |
374 | memset(data->status_buf, 0, size); |
375 | |
376 | /* We could support bulk read for main status registers |
377 | * but I don't expect to see devices with really many main |
378 | * status registers so let's only support single reads for the |
379 | * sake of simplicity. and add bulk reads only if needed |
380 | */ |
381 | for (i = 0; i < chip->num_main_regs; i++) { |
382 | reg = data->get_irq_reg(data, chip->main_status, i); |
383 | ret = regmap_read(map, reg, val: &data->main_status_buf[i]); |
384 | if (ret) { |
385 | dev_err(map->dev, |
386 | "Failed to read IRQ status %d\n" , |
387 | ret); |
388 | goto exit; |
389 | } |
390 | } |
391 | |
392 | /* Read sub registers with active IRQs */ |
393 | for (i = 0; i < chip->num_main_regs; i++) { |
394 | unsigned int b; |
395 | const unsigned long mreg = data->main_status_buf[i]; |
396 | |
397 | for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { |
398 | if (i * map->format.val_bytes * 8 + b > |
399 | max_main_bits) |
400 | break; |
401 | ret = read_sub_irq_data(data, b); |
402 | |
403 | if (ret != 0) { |
404 | dev_err(map->dev, |
405 | "Failed to read IRQ status %d\n" , |
406 | ret); |
407 | goto exit; |
408 | } |
409 | } |
410 | |
411 | } |
412 | } else if (regmap_irq_can_bulk_read_status(data)) { |
413 | |
414 | u8 *buf8 = data->status_reg_buf; |
415 | u16 *buf16 = data->status_reg_buf; |
416 | u32 *buf32 = data->status_reg_buf; |
417 | |
418 | BUG_ON(!data->status_reg_buf); |
419 | |
420 | ret = regmap_bulk_read(map, reg: chip->status_base, |
421 | val: data->status_reg_buf, |
422 | val_count: chip->num_regs); |
423 | if (ret != 0) { |
424 | dev_err(map->dev, "Failed to read IRQ status: %d\n" , |
425 | ret); |
426 | goto exit; |
427 | } |
428 | |
429 | for (i = 0; i < data->chip->num_regs; i++) { |
430 | switch (map->format.val_bytes) { |
431 | case 1: |
432 | data->status_buf[i] = buf8[i]; |
433 | break; |
434 | case 2: |
435 | data->status_buf[i] = buf16[i]; |
436 | break; |
437 | case 4: |
438 | data->status_buf[i] = buf32[i]; |
439 | break; |
440 | default: |
441 | BUG(); |
442 | goto exit; |
443 | } |
444 | } |
445 | |
446 | } else { |
447 | for (i = 0; i < data->chip->num_regs; i++) { |
448 | unsigned int reg = data->get_irq_reg(data, |
449 | data->chip->status_base, i); |
450 | ret = regmap_read(map, reg, val: &data->status_buf[i]); |
451 | |
452 | if (ret != 0) { |
453 | dev_err(map->dev, |
454 | "Failed to read IRQ status: %d\n" , |
455 | ret); |
456 | goto exit; |
457 | } |
458 | } |
459 | } |
460 | |
461 | if (chip->status_invert) |
462 | for (i = 0; i < data->chip->num_regs; i++) |
463 | data->status_buf[i] = ~data->status_buf[i]; |
464 | |
465 | /* |
466 | * Ignore masked IRQs and ack if we need to; we ack early so |
467 | * there is no race between handling and acknowledging the |
468 | * interrupt. We assume that typically few of the interrupts |
469 | * will fire simultaneously so don't worry about overhead from |
470 | * doing a write per register. |
471 | */ |
472 | for (i = 0; i < data->chip->num_regs; i++) { |
473 | data->status_buf[i] &= ~data->mask_buf[i]; |
474 | |
475 | if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { |
476 | reg = data->get_irq_reg(data, data->chip->ack_base, i); |
477 | |
478 | if (chip->ack_invert) |
479 | ret = regmap_write(map, reg, |
480 | val: ~data->status_buf[i]); |
481 | else |
482 | ret = regmap_write(map, reg, |
483 | val: data->status_buf[i]); |
484 | if (chip->clear_ack) { |
485 | if (chip->ack_invert && !ret) |
486 | ret = regmap_write(map, reg, UINT_MAX); |
487 | else if (!ret) |
488 | ret = regmap_write(map, reg, val: 0); |
489 | } |
490 | if (ret != 0) |
491 | dev_err(map->dev, "Failed to ack 0x%x: %d\n" , |
492 | reg, ret); |
493 | } |
494 | } |
495 | |
496 | for (i = 0; i < chip->num_irqs; i++) { |
497 | if (data->status_buf[chip->irqs[i].reg_offset / |
498 | map->reg_stride] & chip->irqs[i].mask) { |
499 | handle_nested_irq(irq: irq_find_mapping(domain: data->domain, hwirq: i)); |
500 | handled = true; |
501 | } |
502 | } |
503 | |
504 | exit: |
505 | if (chip->handle_post_irq) |
506 | chip->handle_post_irq(chip->irq_drv_data); |
507 | |
508 | if (chip->runtime_pm) |
509 | pm_runtime_put(dev: map->dev); |
510 | |
511 | if (handled) |
512 | return IRQ_HANDLED; |
513 | else |
514 | return IRQ_NONE; |
515 | } |
516 | |
517 | static int regmap_irq_map(struct irq_domain *h, unsigned int virq, |
518 | irq_hw_number_t hw) |
519 | { |
520 | struct regmap_irq_chip_data *data = h->host_data; |
521 | |
522 | irq_set_chip_data(irq: virq, data); |
523 | irq_set_chip(irq: virq, chip: &data->irq_chip); |
524 | irq_set_nested_thread(irq: virq, nest: 1); |
525 | irq_set_parent(irq: virq, parent_irq: data->irq); |
526 | irq_set_noprobe(irq: virq); |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | static const struct irq_domain_ops regmap_domain_ops = { |
532 | .map = regmap_irq_map, |
533 | .xlate = irq_domain_xlate_onetwocell, |
534 | }; |
535 | |
536 | /** |
537 | * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. |
538 | * @data: Data for the &struct regmap_irq_chip |
539 | * @base: Base register |
540 | * @index: Register index |
541 | * |
542 | * Returns the register address corresponding to the given @base and @index |
543 | * by the formula ``base + index * regmap_stride * irq_reg_stride``. |
544 | */ |
545 | unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, |
546 | unsigned int base, int index) |
547 | { |
548 | struct regmap *map = data->map; |
549 | |
550 | return base + index * map->reg_stride * data->irq_reg_stride; |
551 | } |
552 | EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); |
553 | |
554 | /** |
555 | * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. |
556 | * @buf: Buffer containing configuration register values, this is a 2D array of |
557 | * `num_config_bases` rows, each of `num_config_regs` elements. |
558 | * @type: The requested IRQ type. |
559 | * @irq_data: The IRQ being configured. |
560 | * @idx: Index of the irq's config registers within each array `buf[i]` |
561 | * @irq_drv_data: Driver specific IRQ data |
562 | * |
563 | * This is a &struct regmap_irq_chip->set_type_config callback suitable for |
564 | * chips with one config register. Register values are updated according to |
565 | * the &struct regmap_irq_type data associated with an IRQ. |
566 | */ |
567 | int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, |
568 | const struct regmap_irq *irq_data, |
569 | int idx, void *irq_drv_data) |
570 | { |
571 | const struct regmap_irq_type *t = &irq_data->type; |
572 | |
573 | if (t->type_reg_mask) |
574 | buf[0][idx] &= ~t->type_reg_mask; |
575 | else |
576 | buf[0][idx] &= ~(t->type_falling_val | |
577 | t->type_rising_val | |
578 | t->type_level_low_val | |
579 | t->type_level_high_val); |
580 | |
581 | switch (type) { |
582 | case IRQ_TYPE_EDGE_FALLING: |
583 | buf[0][idx] |= t->type_falling_val; |
584 | break; |
585 | |
586 | case IRQ_TYPE_EDGE_RISING: |
587 | buf[0][idx] |= t->type_rising_val; |
588 | break; |
589 | |
590 | case IRQ_TYPE_EDGE_BOTH: |
591 | buf[0][idx] |= (t->type_falling_val | |
592 | t->type_rising_val); |
593 | break; |
594 | |
595 | case IRQ_TYPE_LEVEL_HIGH: |
596 | buf[0][idx] |= t->type_level_high_val; |
597 | break; |
598 | |
599 | case IRQ_TYPE_LEVEL_LOW: |
600 | buf[0][idx] |= t->type_level_low_val; |
601 | break; |
602 | |
603 | default: |
604 | return -EINVAL; |
605 | } |
606 | |
607 | return 0; |
608 | } |
609 | EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); |
610 | |
611 | /** |
612 | * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling |
613 | * |
614 | * @fwnode: The firmware node where the IRQ domain should be added to. |
615 | * @map: The regmap for the device. |
616 | * @irq: The IRQ the device uses to signal interrupts. |
617 | * @irq_flags: The IRQF_ flags to use for the primary interrupt. |
618 | * @irq_base: Allocate at specific IRQ number if irq_base > 0. |
619 | * @chip: Configuration for the interrupt controller. |
620 | * @data: Runtime data structure for the controller, allocated on success. |
621 | * |
622 | * Returns 0 on success or an errno on failure. |
623 | * |
624 | * In order for this to be efficient the chip really should use a |
625 | * register cache. The chip driver is responsible for restoring the |
626 | * register values used by the IRQ controller over suspend and resume. |
627 | */ |
628 | int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, |
629 | struct regmap *map, int irq, |
630 | int irq_flags, int irq_base, |
631 | const struct regmap_irq_chip *chip, |
632 | struct regmap_irq_chip_data **data) |
633 | { |
634 | struct regmap_irq_chip_data *d; |
635 | int i; |
636 | int ret = -ENOMEM; |
637 | u32 reg; |
638 | |
639 | if (chip->num_regs <= 0) |
640 | return -EINVAL; |
641 | |
642 | if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) |
643 | return -EINVAL; |
644 | |
645 | if (chip->mask_base && chip->unmask_base && !chip->mask_unmask_non_inverted) |
646 | return -EINVAL; |
647 | |
648 | for (i = 0; i < chip->num_irqs; i++) { |
649 | if (chip->irqs[i].reg_offset % map->reg_stride) |
650 | return -EINVAL; |
651 | if (chip->irqs[i].reg_offset / map->reg_stride >= |
652 | chip->num_regs) |
653 | return -EINVAL; |
654 | } |
655 | |
656 | if (irq_base) { |
657 | irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); |
658 | if (irq_base < 0) { |
659 | dev_warn(map->dev, "Failed to allocate IRQs: %d\n" , |
660 | irq_base); |
661 | return irq_base; |
662 | } |
663 | } |
664 | |
665 | d = kzalloc(size: sizeof(*d), GFP_KERNEL); |
666 | if (!d) |
667 | return -ENOMEM; |
668 | |
669 | if (chip->num_main_regs) { |
670 | d->main_status_buf = kcalloc(n: chip->num_main_regs, |
671 | size: sizeof(*d->main_status_buf), |
672 | GFP_KERNEL); |
673 | |
674 | if (!d->main_status_buf) |
675 | goto err_alloc; |
676 | } |
677 | |
678 | d->status_buf = kcalloc(n: chip->num_regs, size: sizeof(*d->status_buf), |
679 | GFP_KERNEL); |
680 | if (!d->status_buf) |
681 | goto err_alloc; |
682 | |
683 | d->mask_buf = kcalloc(n: chip->num_regs, size: sizeof(*d->mask_buf), |
684 | GFP_KERNEL); |
685 | if (!d->mask_buf) |
686 | goto err_alloc; |
687 | |
688 | d->mask_buf_def = kcalloc(n: chip->num_regs, size: sizeof(*d->mask_buf_def), |
689 | GFP_KERNEL); |
690 | if (!d->mask_buf_def) |
691 | goto err_alloc; |
692 | |
693 | if (chip->wake_base) { |
694 | d->wake_buf = kcalloc(n: chip->num_regs, size: sizeof(*d->wake_buf), |
695 | GFP_KERNEL); |
696 | if (!d->wake_buf) |
697 | goto err_alloc; |
698 | } |
699 | |
700 | if (chip->type_in_mask) { |
701 | d->type_buf_def = kcalloc(n: chip->num_regs, |
702 | size: sizeof(*d->type_buf_def), GFP_KERNEL); |
703 | if (!d->type_buf_def) |
704 | goto err_alloc; |
705 | |
706 | d->type_buf = kcalloc(n: chip->num_regs, size: sizeof(*d->type_buf), GFP_KERNEL); |
707 | if (!d->type_buf) |
708 | goto err_alloc; |
709 | } |
710 | |
711 | if (chip->num_config_bases && chip->num_config_regs) { |
712 | /* |
713 | * Create config_buf[num_config_bases][num_config_regs] |
714 | */ |
715 | d->config_buf = kcalloc(n: chip->num_config_bases, |
716 | size: sizeof(*d->config_buf), GFP_KERNEL); |
717 | if (!d->config_buf) |
718 | goto err_alloc; |
719 | |
720 | for (i = 0; i < chip->num_config_bases; i++) { |
721 | d->config_buf[i] = kcalloc(n: chip->num_config_regs, |
722 | size: sizeof(**d->config_buf), |
723 | GFP_KERNEL); |
724 | if (!d->config_buf[i]) |
725 | goto err_alloc; |
726 | } |
727 | } |
728 | |
729 | d->irq_chip = regmap_irq_chip; |
730 | d->irq_chip.name = chip->name; |
731 | d->irq = irq; |
732 | d->map = map; |
733 | d->chip = chip; |
734 | d->irq_base = irq_base; |
735 | |
736 | if (chip->irq_reg_stride) |
737 | d->irq_reg_stride = chip->irq_reg_stride; |
738 | else |
739 | d->irq_reg_stride = 1; |
740 | |
741 | if (chip->get_irq_reg) |
742 | d->get_irq_reg = chip->get_irq_reg; |
743 | else |
744 | d->get_irq_reg = regmap_irq_get_irq_reg_linear; |
745 | |
746 | if (regmap_irq_can_bulk_read_status(data: d)) { |
747 | d->status_reg_buf = kmalloc_array(n: chip->num_regs, |
748 | size: map->format.val_bytes, |
749 | GFP_KERNEL); |
750 | if (!d->status_reg_buf) |
751 | goto err_alloc; |
752 | } |
753 | |
754 | mutex_init(&d->lock); |
755 | |
756 | for (i = 0; i < chip->num_irqs; i++) |
757 | d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] |
758 | |= chip->irqs[i].mask; |
759 | |
760 | /* Mask all the interrupts by default */ |
761 | for (i = 0; i < chip->num_regs; i++) { |
762 | d->mask_buf[i] = d->mask_buf_def[i]; |
763 | |
764 | if (chip->handle_mask_sync) { |
765 | ret = chip->handle_mask_sync(i, d->mask_buf_def[i], |
766 | d->mask_buf[i], |
767 | chip->irq_drv_data); |
768 | if (ret) |
769 | goto err_alloc; |
770 | } |
771 | |
772 | if (chip->mask_base && !chip->handle_mask_sync) { |
773 | reg = d->get_irq_reg(d, chip->mask_base, i); |
774 | ret = regmap_update_bits(map: d->map, reg, |
775 | mask: d->mask_buf_def[i], |
776 | val: d->mask_buf[i]); |
777 | if (ret) { |
778 | dev_err(map->dev, "Failed to set masks in 0x%x: %d\n" , |
779 | reg, ret); |
780 | goto err_alloc; |
781 | } |
782 | } |
783 | |
784 | if (chip->unmask_base && !chip->handle_mask_sync) { |
785 | reg = d->get_irq_reg(d, chip->unmask_base, i); |
786 | ret = regmap_update_bits(map: d->map, reg, |
787 | mask: d->mask_buf_def[i], val: ~d->mask_buf[i]); |
788 | if (ret) { |
789 | dev_err(map->dev, "Failed to set masks in 0x%x: %d\n" , |
790 | reg, ret); |
791 | goto err_alloc; |
792 | } |
793 | } |
794 | |
795 | if (!chip->init_ack_masked) |
796 | continue; |
797 | |
798 | /* Ack masked but set interrupts */ |
799 | if (d->chip->no_status) { |
800 | /* no status register so default to all active */ |
801 | d->status_buf[i] = GENMASK(31, 0); |
802 | } else { |
803 | reg = d->get_irq_reg(d, d->chip->status_base, i); |
804 | ret = regmap_read(map, reg, val: &d->status_buf[i]); |
805 | if (ret != 0) { |
806 | dev_err(map->dev, "Failed to read IRQ status: %d\n" , |
807 | ret); |
808 | goto err_alloc; |
809 | } |
810 | } |
811 | |
812 | if (chip->status_invert) |
813 | d->status_buf[i] = ~d->status_buf[i]; |
814 | |
815 | if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { |
816 | reg = d->get_irq_reg(d, d->chip->ack_base, i); |
817 | if (chip->ack_invert) |
818 | ret = regmap_write(map, reg, |
819 | val: ~(d->status_buf[i] & d->mask_buf[i])); |
820 | else |
821 | ret = regmap_write(map, reg, |
822 | val: d->status_buf[i] & d->mask_buf[i]); |
823 | if (chip->clear_ack) { |
824 | if (chip->ack_invert && !ret) |
825 | ret = regmap_write(map, reg, UINT_MAX); |
826 | else if (!ret) |
827 | ret = regmap_write(map, reg, val: 0); |
828 | } |
829 | if (ret != 0) { |
830 | dev_err(map->dev, "Failed to ack 0x%x: %d\n" , |
831 | reg, ret); |
832 | goto err_alloc; |
833 | } |
834 | } |
835 | } |
836 | |
837 | /* Wake is disabled by default */ |
838 | if (d->wake_buf) { |
839 | for (i = 0; i < chip->num_regs; i++) { |
840 | d->wake_buf[i] = d->mask_buf_def[i]; |
841 | reg = d->get_irq_reg(d, d->chip->wake_base, i); |
842 | |
843 | if (chip->wake_invert) |
844 | ret = regmap_update_bits(map: d->map, reg, |
845 | mask: d->mask_buf_def[i], |
846 | val: 0); |
847 | else |
848 | ret = regmap_update_bits(map: d->map, reg, |
849 | mask: d->mask_buf_def[i], |
850 | val: d->wake_buf[i]); |
851 | if (ret != 0) { |
852 | dev_err(map->dev, "Failed to set masks in 0x%x: %d\n" , |
853 | reg, ret); |
854 | goto err_alloc; |
855 | } |
856 | } |
857 | } |
858 | |
859 | if (irq_base) |
860 | d->domain = irq_domain_create_legacy(fwnode, size: chip->num_irqs, |
861 | first_irq: irq_base, first_hwirq: 0, |
862 | ops: ®map_domain_ops, host_data: d); |
863 | else |
864 | d->domain = irq_domain_create_linear(fwnode, size: chip->num_irqs, |
865 | ops: ®map_domain_ops, host_data: d); |
866 | if (!d->domain) { |
867 | dev_err(map->dev, "Failed to create IRQ domain\n" ); |
868 | ret = -ENOMEM; |
869 | goto err_alloc; |
870 | } |
871 | |
872 | ret = request_threaded_irq(irq, NULL, thread_fn: regmap_irq_thread, |
873 | flags: irq_flags | IRQF_ONESHOT, |
874 | name: chip->name, dev: d); |
875 | if (ret != 0) { |
876 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n" , |
877 | irq, chip->name, ret); |
878 | goto err_domain; |
879 | } |
880 | |
881 | *data = d; |
882 | |
883 | return 0; |
884 | |
885 | err_domain: |
886 | /* Should really dispose of the domain but... */ |
887 | err_alloc: |
888 | kfree(objp: d->type_buf); |
889 | kfree(objp: d->type_buf_def); |
890 | kfree(objp: d->wake_buf); |
891 | kfree(objp: d->mask_buf_def); |
892 | kfree(objp: d->mask_buf); |
893 | kfree(objp: d->status_buf); |
894 | kfree(objp: d->status_reg_buf); |
895 | if (d->config_buf) { |
896 | for (i = 0; i < chip->num_config_bases; i++) |
897 | kfree(objp: d->config_buf[i]); |
898 | kfree(objp: d->config_buf); |
899 | } |
900 | kfree(objp: d); |
901 | return ret; |
902 | } |
903 | EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); |
904 | |
905 | /** |
906 | * regmap_add_irq_chip() - Use standard regmap IRQ controller handling |
907 | * |
908 | * @map: The regmap for the device. |
909 | * @irq: The IRQ the device uses to signal interrupts. |
910 | * @irq_flags: The IRQF_ flags to use for the primary interrupt. |
911 | * @irq_base: Allocate at specific IRQ number if irq_base > 0. |
912 | * @chip: Configuration for the interrupt controller. |
913 | * @data: Runtime data structure for the controller, allocated on success. |
914 | * |
915 | * Returns 0 on success or an errno on failure. |
916 | * |
917 | * This is the same as regmap_add_irq_chip_fwnode, except that the firmware |
918 | * node of the regmap is used. |
919 | */ |
920 | int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, |
921 | int irq_base, const struct regmap_irq_chip *chip, |
922 | struct regmap_irq_chip_data **data) |
923 | { |
924 | return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, |
925 | irq_flags, irq_base, chip, data); |
926 | } |
927 | EXPORT_SYMBOL_GPL(regmap_add_irq_chip); |
928 | |
929 | /** |
930 | * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip |
931 | * |
932 | * @irq: Primary IRQ for the device |
933 | * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() |
934 | * |
935 | * This function also disposes of all mapped IRQs on the chip. |
936 | */ |
937 | void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) |
938 | { |
939 | unsigned int virq; |
940 | int i, hwirq; |
941 | |
942 | if (!d) |
943 | return; |
944 | |
945 | free_irq(irq, d); |
946 | |
947 | /* Dispose all virtual irq from irq domain before removing it */ |
948 | for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { |
949 | /* Ignore hwirq if holes in the IRQ list */ |
950 | if (!d->chip->irqs[hwirq].mask) |
951 | continue; |
952 | |
953 | /* |
954 | * Find the virtual irq of hwirq on chip and if it is |
955 | * there then dispose it |
956 | */ |
957 | virq = irq_find_mapping(domain: d->domain, hwirq); |
958 | if (virq) |
959 | irq_dispose_mapping(virq); |
960 | } |
961 | |
962 | irq_domain_remove(host: d->domain); |
963 | kfree(objp: d->type_buf); |
964 | kfree(objp: d->type_buf_def); |
965 | kfree(objp: d->wake_buf); |
966 | kfree(objp: d->mask_buf_def); |
967 | kfree(objp: d->mask_buf); |
968 | kfree(objp: d->status_reg_buf); |
969 | kfree(objp: d->status_buf); |
970 | if (d->config_buf) { |
971 | for (i = 0; i < d->chip->num_config_bases; i++) |
972 | kfree(objp: d->config_buf[i]); |
973 | kfree(objp: d->config_buf); |
974 | } |
975 | kfree(objp: d); |
976 | } |
977 | EXPORT_SYMBOL_GPL(regmap_del_irq_chip); |
978 | |
979 | static void devm_regmap_irq_chip_release(struct device *dev, void *res) |
980 | { |
981 | struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; |
982 | |
983 | regmap_del_irq_chip(d->irq, d); |
984 | } |
985 | |
986 | static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) |
987 | |
988 | { |
989 | struct regmap_irq_chip_data **r = res; |
990 | |
991 | if (!r || !*r) { |
992 | WARN_ON(!r || !*r); |
993 | return 0; |
994 | } |
995 | return *r == data; |
996 | } |
997 | |
998 | /** |
999 | * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() |
1000 | * |
1001 | * @dev: The device pointer on which irq_chip belongs to. |
1002 | * @fwnode: The firmware node where the IRQ domain should be added to. |
1003 | * @map: The regmap for the device. |
1004 | * @irq: The IRQ the device uses to signal interrupts |
1005 | * @irq_flags: The IRQF_ flags to use for the primary interrupt. |
1006 | * @irq_base: Allocate at specific IRQ number if irq_base > 0. |
1007 | * @chip: Configuration for the interrupt controller. |
1008 | * @data: Runtime data structure for the controller, allocated on success |
1009 | * |
1010 | * Returns 0 on success or an errno on failure. |
1011 | * |
1012 | * The ®map_irq_chip_data will be automatically released when the device is |
1013 | * unbound. |
1014 | */ |
1015 | int devm_regmap_add_irq_chip_fwnode(struct device *dev, |
1016 | struct fwnode_handle *fwnode, |
1017 | struct regmap *map, int irq, |
1018 | int irq_flags, int irq_base, |
1019 | const struct regmap_irq_chip *chip, |
1020 | struct regmap_irq_chip_data **data) |
1021 | { |
1022 | struct regmap_irq_chip_data **ptr, *d; |
1023 | int ret; |
1024 | |
1025 | ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), |
1026 | GFP_KERNEL); |
1027 | if (!ptr) |
1028 | return -ENOMEM; |
1029 | |
1030 | ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, |
1031 | chip, &d); |
1032 | if (ret < 0) { |
1033 | devres_free(res: ptr); |
1034 | return ret; |
1035 | } |
1036 | |
1037 | *ptr = d; |
1038 | devres_add(dev, res: ptr); |
1039 | *data = d; |
1040 | return 0; |
1041 | } |
1042 | EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); |
1043 | |
1044 | /** |
1045 | * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip() |
1046 | * |
1047 | * @dev: The device pointer on which irq_chip belongs to. |
1048 | * @map: The regmap for the device. |
1049 | * @irq: The IRQ the device uses to signal interrupts |
1050 | * @irq_flags: The IRQF_ flags to use for the primary interrupt. |
1051 | * @irq_base: Allocate at specific IRQ number if irq_base > 0. |
1052 | * @chip: Configuration for the interrupt controller. |
1053 | * @data: Runtime data structure for the controller, allocated on success |
1054 | * |
1055 | * Returns 0 on success or an errno on failure. |
1056 | * |
1057 | * The ®map_irq_chip_data will be automatically released when the device is |
1058 | * unbound. |
1059 | */ |
1060 | int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, |
1061 | int irq_flags, int irq_base, |
1062 | const struct regmap_irq_chip *chip, |
1063 | struct regmap_irq_chip_data **data) |
1064 | { |
1065 | return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, |
1066 | irq, irq_flags, irq_base, chip, |
1067 | data); |
1068 | } |
1069 | EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); |
1070 | |
1071 | /** |
1072 | * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() |
1073 | * |
1074 | * @dev: Device for which the resource was allocated. |
1075 | * @irq: Primary IRQ for the device. |
1076 | * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). |
1077 | * |
1078 | * A resource managed version of regmap_del_irq_chip(). |
1079 | */ |
1080 | void devm_regmap_del_irq_chip(struct device *dev, int irq, |
1081 | struct regmap_irq_chip_data *data) |
1082 | { |
1083 | int rc; |
1084 | |
1085 | WARN_ON(irq != data->irq); |
1086 | rc = devres_release(dev, release: devm_regmap_irq_chip_release, |
1087 | match: devm_regmap_irq_chip_match, match_data: data); |
1088 | |
1089 | if (rc != 0) |
1090 | WARN_ON(rc); |
1091 | } |
1092 | EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); |
1093 | |
1094 | /** |
1095 | * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip |
1096 | * |
1097 | * @data: regmap irq controller to operate on. |
1098 | * |
1099 | * Useful for drivers to request their own IRQs. |
1100 | */ |
1101 | int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) |
1102 | { |
1103 | WARN_ON(!data->irq_base); |
1104 | return data->irq_base; |
1105 | } |
1106 | EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); |
1107 | |
1108 | /** |
1109 | * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ |
1110 | * |
1111 | * @data: regmap irq controller to operate on. |
1112 | * @irq: index of the interrupt requested in the chip IRQs. |
1113 | * |
1114 | * Useful for drivers to request their own IRQs. |
1115 | */ |
1116 | int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) |
1117 | { |
1118 | /* Handle holes in the IRQ list */ |
1119 | if (!data->chip->irqs[irq].mask) |
1120 | return -EINVAL; |
1121 | |
1122 | return irq_create_mapping(host: data->domain, hwirq: irq); |
1123 | } |
1124 | EXPORT_SYMBOL_GPL(regmap_irq_get_virq); |
1125 | |
1126 | /** |
1127 | * regmap_irq_get_domain() - Retrieve the irq_domain for the chip |
1128 | * |
1129 | * @data: regmap_irq controller to operate on. |
1130 | * |
1131 | * Useful for drivers to request their own IRQs and for integration |
1132 | * with subsystems. For ease of integration NULL is accepted as a |
1133 | * domain, allowing devices to just call this even if no domain is |
1134 | * allocated. |
1135 | */ |
1136 | struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) |
1137 | { |
1138 | if (data) |
1139 | return data->domain; |
1140 | else |
1141 | return NULL; |
1142 | } |
1143 | EXPORT_SYMBOL_GPL(regmap_irq_get_domain); |
1144 | |