1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2014-2018 MediaTek Inc. |
3 | |
4 | /* |
5 | * Library for MediaTek External Interrupt Support |
6 | * |
7 | * Author: Maoguang Meng <maoguang.meng@mediatek.com> |
8 | * Sean Wang <sean.wang@mediatek.com> |
9 | * |
10 | */ |
11 | |
12 | #include <linux/delay.h> |
13 | #include <linux/err.h> |
14 | #include <linux/gpio/driver.h> |
15 | #include <linux/io.h> |
16 | #include <linux/irqchip/chained_irq.h> |
17 | #include <linux/irqdomain.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of_irq.h> |
20 | #include <linux/platform_device.h> |
21 | |
22 | #include "mtk-eint.h" |
23 | |
24 | #define MTK_EINT_EDGE_SENSITIVE 0 |
25 | #define MTK_EINT_LEVEL_SENSITIVE 1 |
26 | #define MTK_EINT_DBNC_SET_DBNC_BITS 4 |
27 | #define MTK_EINT_DBNC_MAX 16 |
28 | #define MTK_EINT_DBNC_RST_BIT (0x1 << 1) |
29 | #define MTK_EINT_DBNC_SET_EN (0x1 << 0) |
30 | |
31 | static const struct mtk_eint_regs mtk_generic_eint_regs = { |
32 | .stat = 0x000, |
33 | .ack = 0x040, |
34 | .mask = 0x080, |
35 | .mask_set = 0x0c0, |
36 | .mask_clr = 0x100, |
37 | .sens = 0x140, |
38 | .sens_set = 0x180, |
39 | .sens_clr = 0x1c0, |
40 | .soft = 0x200, |
41 | .soft_set = 0x240, |
42 | .soft_clr = 0x280, |
43 | .pol = 0x300, |
44 | .pol_set = 0x340, |
45 | .pol_clr = 0x380, |
46 | .dom_en = 0x400, |
47 | .dbnc_ctrl = 0x500, |
48 | .dbnc_set = 0x600, |
49 | .dbnc_clr = 0x700, |
50 | }; |
51 | |
52 | const unsigned int debounce_time_mt2701[] = { |
53 | 500, 1000, 16000, 32000, 64000, 128000, 256000, 0 |
54 | }; |
55 | EXPORT_SYMBOL_GPL(debounce_time_mt2701); |
56 | |
57 | const unsigned int debounce_time_mt6765[] = { |
58 | 125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0 |
59 | }; |
60 | EXPORT_SYMBOL_GPL(debounce_time_mt6765); |
61 | |
62 | const unsigned int debounce_time_mt6795[] = { |
63 | 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0 |
64 | }; |
65 | EXPORT_SYMBOL_GPL(debounce_time_mt6795); |
66 | |
67 | static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint, |
68 | unsigned int eint_num, |
69 | unsigned int offset) |
70 | { |
71 | unsigned int eint_base = 0; |
72 | void __iomem *reg; |
73 | |
74 | if (eint_num >= eint->hw->ap_num) |
75 | eint_base = eint->hw->ap_num; |
76 | |
77 | reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4; |
78 | |
79 | return reg; |
80 | } |
81 | |
82 | static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint, |
83 | unsigned int eint_num) |
84 | { |
85 | unsigned int sens; |
86 | unsigned int bit = BIT(eint_num % 32); |
87 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num, |
88 | offset: eint->regs->sens); |
89 | |
90 | if (readl(addr: reg) & bit) |
91 | sens = MTK_EINT_LEVEL_SENSITIVE; |
92 | else |
93 | sens = MTK_EINT_EDGE_SENSITIVE; |
94 | |
95 | if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE) |
96 | return 1; |
97 | else |
98 | return 0; |
99 | } |
100 | |
101 | static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq) |
102 | { |
103 | int start_level, curr_level; |
104 | unsigned int reg_offset; |
105 | u32 mask = BIT(hwirq & 0x1f); |
106 | u32 port = (hwirq >> 5) & eint->hw->port_mask; |
107 | void __iomem *reg = eint->base + (port << 2); |
108 | |
109 | curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq); |
110 | |
111 | do { |
112 | start_level = curr_level; |
113 | if (start_level) |
114 | reg_offset = eint->regs->pol_clr; |
115 | else |
116 | reg_offset = eint->regs->pol_set; |
117 | writel(val: mask, addr: reg + reg_offset); |
118 | |
119 | curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, |
120 | hwirq); |
121 | } while (start_level != curr_level); |
122 | |
123 | return start_level; |
124 | } |
125 | |
126 | static void mtk_eint_mask(struct irq_data *d) |
127 | { |
128 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
129 | u32 mask = BIT(d->hwirq & 0x1f); |
130 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, |
131 | offset: eint->regs->mask_set); |
132 | |
133 | eint->cur_mask[d->hwirq >> 5] &= ~mask; |
134 | |
135 | writel(val: mask, addr: reg); |
136 | } |
137 | |
138 | static void mtk_eint_unmask(struct irq_data *d) |
139 | { |
140 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
141 | u32 mask = BIT(d->hwirq & 0x1f); |
142 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, |
143 | offset: eint->regs->mask_clr); |
144 | |
145 | eint->cur_mask[d->hwirq >> 5] |= mask; |
146 | |
147 | writel(val: mask, addr: reg); |
148 | |
149 | if (eint->dual_edge[d->hwirq]) |
150 | mtk_eint_flip_edge(eint, hwirq: d->hwirq); |
151 | } |
152 | |
153 | static unsigned int mtk_eint_get_mask(struct mtk_eint *eint, |
154 | unsigned int eint_num) |
155 | { |
156 | unsigned int bit = BIT(eint_num % 32); |
157 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num, |
158 | offset: eint->regs->mask); |
159 | |
160 | return !!(readl(addr: reg) & bit); |
161 | } |
162 | |
163 | static void mtk_eint_ack(struct irq_data *d) |
164 | { |
165 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
166 | u32 mask = BIT(d->hwirq & 0x1f); |
167 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, |
168 | offset: eint->regs->ack); |
169 | |
170 | writel(val: mask, addr: reg); |
171 | } |
172 | |
173 | static int mtk_eint_set_type(struct irq_data *d, unsigned int type) |
174 | { |
175 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
176 | bool masked; |
177 | u32 mask = BIT(d->hwirq & 0x1f); |
178 | void __iomem *reg; |
179 | |
180 | if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) || |
181 | ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) { |
182 | dev_err(eint->dev, |
183 | "Can't configure IRQ%d (EINT%lu) for type 0x%X\n" , |
184 | d->irq, d->hwirq, type); |
185 | return -EINVAL; |
186 | } |
187 | |
188 | if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) |
189 | eint->dual_edge[d->hwirq] = 1; |
190 | else |
191 | eint->dual_edge[d->hwirq] = 0; |
192 | |
193 | if (!mtk_eint_get_mask(eint, eint_num: d->hwirq)) { |
194 | mtk_eint_mask(d); |
195 | masked = false; |
196 | } else { |
197 | masked = true; |
198 | } |
199 | |
200 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) { |
201 | reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, offset: eint->regs->pol_clr); |
202 | writel(val: mask, addr: reg); |
203 | } else { |
204 | reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, offset: eint->regs->pol_set); |
205 | writel(val: mask, addr: reg); |
206 | } |
207 | |
208 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
209 | reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, offset: eint->regs->sens_clr); |
210 | writel(val: mask, addr: reg); |
211 | } else { |
212 | reg = mtk_eint_get_offset(eint, eint_num: d->hwirq, offset: eint->regs->sens_set); |
213 | writel(val: mask, addr: reg); |
214 | } |
215 | |
216 | mtk_eint_ack(d); |
217 | if (!masked) |
218 | mtk_eint_unmask(d); |
219 | |
220 | return 0; |
221 | } |
222 | |
223 | static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on) |
224 | { |
225 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
226 | int shift = d->hwirq & 0x1f; |
227 | int reg = d->hwirq >> 5; |
228 | |
229 | if (on) |
230 | eint->wake_mask[reg] |= BIT(shift); |
231 | else |
232 | eint->wake_mask[reg] &= ~BIT(shift); |
233 | |
234 | return 0; |
235 | } |
236 | |
237 | static void mtk_eint_chip_write_mask(const struct mtk_eint *eint, |
238 | void __iomem *base, u32 *buf) |
239 | { |
240 | int port; |
241 | void __iomem *reg; |
242 | |
243 | for (port = 0; port < eint->hw->ports; port++) { |
244 | reg = base + (port << 2); |
245 | writel_relaxed(~buf[port], reg + eint->regs->mask_set); |
246 | writel_relaxed(buf[port], reg + eint->regs->mask_clr); |
247 | } |
248 | } |
249 | |
250 | static int mtk_eint_irq_request_resources(struct irq_data *d) |
251 | { |
252 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
253 | struct gpio_chip *gpio_c; |
254 | unsigned int gpio_n; |
255 | int err; |
256 | |
257 | err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, |
258 | &gpio_n, &gpio_c); |
259 | if (err < 0) { |
260 | dev_err(eint->dev, "Can not find pin\n" ); |
261 | return err; |
262 | } |
263 | |
264 | err = gpiochip_lock_as_irq(gc: gpio_c, offset: gpio_n); |
265 | if (err < 0) { |
266 | dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n" , |
267 | irqd_to_hwirq(d)); |
268 | return err; |
269 | } |
270 | |
271 | err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq); |
272 | if (err < 0) { |
273 | dev_err(eint->dev, "Can not eint mode\n" ); |
274 | return err; |
275 | } |
276 | |
277 | return 0; |
278 | } |
279 | |
280 | static void mtk_eint_irq_release_resources(struct irq_data *d) |
281 | { |
282 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); |
283 | struct gpio_chip *gpio_c; |
284 | unsigned int gpio_n; |
285 | |
286 | eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n, |
287 | &gpio_c); |
288 | |
289 | gpiochip_unlock_as_irq(gc: gpio_c, offset: gpio_n); |
290 | } |
291 | |
292 | static struct irq_chip mtk_eint_irq_chip = { |
293 | .name = "mt-eint" , |
294 | .irq_disable = mtk_eint_mask, |
295 | .irq_mask = mtk_eint_mask, |
296 | .irq_unmask = mtk_eint_unmask, |
297 | .irq_ack = mtk_eint_ack, |
298 | .irq_set_type = mtk_eint_set_type, |
299 | .irq_set_wake = mtk_eint_irq_set_wake, |
300 | .irq_request_resources = mtk_eint_irq_request_resources, |
301 | .irq_release_resources = mtk_eint_irq_release_resources, |
302 | }; |
303 | |
304 | static unsigned int mtk_eint_hw_init(struct mtk_eint *eint) |
305 | { |
306 | void __iomem *dom_en = eint->base + eint->regs->dom_en; |
307 | void __iomem *mask_set = eint->base + eint->regs->mask_set; |
308 | unsigned int i; |
309 | |
310 | for (i = 0; i < eint->hw->ap_num; i += 32) { |
311 | writel(val: 0xffffffff, addr: dom_en); |
312 | writel(val: 0xffffffff, addr: mask_set); |
313 | dom_en += 4; |
314 | mask_set += 4; |
315 | } |
316 | |
317 | return 0; |
318 | } |
319 | |
320 | static inline void |
321 | mtk_eint_debounce_process(struct mtk_eint *eint, int index) |
322 | { |
323 | unsigned int rst, ctrl_offset; |
324 | unsigned int bit, dbnc; |
325 | |
326 | ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl; |
327 | dbnc = readl(addr: eint->base + ctrl_offset); |
328 | bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8); |
329 | if ((bit & dbnc) > 0) { |
330 | ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set; |
331 | rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8); |
332 | writel(val: rst, addr: eint->base + ctrl_offset); |
333 | } |
334 | } |
335 | |
336 | static void mtk_eint_irq_handler(struct irq_desc *desc) |
337 | { |
338 | struct irq_chip *chip = irq_desc_get_chip(desc); |
339 | struct mtk_eint *eint = irq_desc_get_handler_data(desc); |
340 | unsigned int status, eint_num; |
341 | int offset, mask_offset, index; |
342 | void __iomem *reg = mtk_eint_get_offset(eint, eint_num: 0, offset: eint->regs->stat); |
343 | int dual_edge, start_level, curr_level; |
344 | |
345 | chained_irq_enter(chip, desc); |
346 | for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32, |
347 | reg += 4) { |
348 | status = readl(addr: reg); |
349 | while (status) { |
350 | offset = __ffs(status); |
351 | mask_offset = eint_num >> 5; |
352 | index = eint_num + offset; |
353 | status &= ~BIT(offset); |
354 | |
355 | /* |
356 | * If we get an interrupt on pin that was only required |
357 | * for wake (but no real interrupt requested), mask the |
358 | * interrupt (as would mtk_eint_resume do anyway later |
359 | * in the resume sequence). |
360 | */ |
361 | if (eint->wake_mask[mask_offset] & BIT(offset) && |
362 | !(eint->cur_mask[mask_offset] & BIT(offset))) { |
363 | writel_relaxed(BIT(offset), reg - |
364 | eint->regs->stat + |
365 | eint->regs->mask_set); |
366 | } |
367 | |
368 | dual_edge = eint->dual_edge[index]; |
369 | if (dual_edge) { |
370 | /* |
371 | * Clear soft-irq in case we raised it last |
372 | * time. |
373 | */ |
374 | writel(BIT(offset), addr: reg - eint->regs->stat + |
375 | eint->regs->soft_clr); |
376 | |
377 | start_level = |
378 | eint->gpio_xlate->get_gpio_state(eint->pctl, |
379 | index); |
380 | } |
381 | |
382 | generic_handle_domain_irq(domain: eint->domain, hwirq: index); |
383 | |
384 | if (dual_edge) { |
385 | curr_level = mtk_eint_flip_edge(eint, hwirq: index); |
386 | |
387 | /* |
388 | * If level changed, we might lost one edge |
389 | * interrupt, raised it through soft-irq. |
390 | */ |
391 | if (start_level != curr_level) |
392 | writel(BIT(offset), addr: reg - |
393 | eint->regs->stat + |
394 | eint->regs->soft_set); |
395 | } |
396 | |
397 | if (index < eint->hw->db_cnt) |
398 | mtk_eint_debounce_process(eint, index); |
399 | } |
400 | } |
401 | chained_irq_exit(chip, desc); |
402 | } |
403 | |
404 | int mtk_eint_do_suspend(struct mtk_eint *eint) |
405 | { |
406 | mtk_eint_chip_write_mask(eint, base: eint->base, buf: eint->wake_mask); |
407 | |
408 | return 0; |
409 | } |
410 | EXPORT_SYMBOL_GPL(mtk_eint_do_suspend); |
411 | |
412 | int mtk_eint_do_resume(struct mtk_eint *eint) |
413 | { |
414 | mtk_eint_chip_write_mask(eint, base: eint->base, buf: eint->cur_mask); |
415 | |
416 | return 0; |
417 | } |
418 | EXPORT_SYMBOL_GPL(mtk_eint_do_resume); |
419 | |
420 | int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num, |
421 | unsigned int debounce) |
422 | { |
423 | int virq, eint_offset; |
424 | unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, |
425 | dbnc; |
426 | struct irq_data *d; |
427 | |
428 | if (!eint->hw->db_time) |
429 | return -EOPNOTSUPP; |
430 | |
431 | virq = irq_find_mapping(domain: eint->domain, hwirq: eint_num); |
432 | eint_offset = (eint_num % 4) * 8; |
433 | d = irq_get_irq_data(irq: virq); |
434 | |
435 | set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set; |
436 | clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr; |
437 | |
438 | if (!mtk_eint_can_en_debounce(eint, eint_num)) |
439 | return -EINVAL; |
440 | |
441 | dbnc = eint->num_db_time; |
442 | for (i = 0; i < eint->num_db_time; i++) { |
443 | if (debounce <= eint->hw->db_time[i]) { |
444 | dbnc = i; |
445 | break; |
446 | } |
447 | } |
448 | |
449 | if (!mtk_eint_get_mask(eint, eint_num)) { |
450 | mtk_eint_mask(d); |
451 | unmask = 1; |
452 | } else { |
453 | unmask = 0; |
454 | } |
455 | |
456 | clr_bit = 0xff << eint_offset; |
457 | writel(val: clr_bit, addr: eint->base + clr_offset); |
458 | |
459 | bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) << |
460 | eint_offset; |
461 | rst = MTK_EINT_DBNC_RST_BIT << eint_offset; |
462 | writel(val: rst | bit, addr: eint->base + set_offset); |
463 | |
464 | /* |
465 | * Delay a while (more than 2T) to wait for hw debounce counter reset |
466 | * work correctly. |
467 | */ |
468 | udelay(1); |
469 | if (unmask == 1) |
470 | mtk_eint_unmask(d); |
471 | |
472 | return 0; |
473 | } |
474 | EXPORT_SYMBOL_GPL(mtk_eint_set_debounce); |
475 | |
476 | int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n) |
477 | { |
478 | int irq; |
479 | |
480 | irq = irq_find_mapping(domain: eint->domain, hwirq: eint_n); |
481 | if (!irq) |
482 | return -EINVAL; |
483 | |
484 | return irq; |
485 | } |
486 | EXPORT_SYMBOL_GPL(mtk_eint_find_irq); |
487 | |
488 | int mtk_eint_do_init(struct mtk_eint *eint) |
489 | { |
490 | int i; |
491 | |
492 | /* If clients don't assign a specific regs, let's use generic one */ |
493 | if (!eint->regs) |
494 | eint->regs = &mtk_generic_eint_regs; |
495 | |
496 | eint->wake_mask = devm_kcalloc(dev: eint->dev, n: eint->hw->ports, |
497 | size: sizeof(*eint->wake_mask), GFP_KERNEL); |
498 | if (!eint->wake_mask) |
499 | return -ENOMEM; |
500 | |
501 | eint->cur_mask = devm_kcalloc(dev: eint->dev, n: eint->hw->ports, |
502 | size: sizeof(*eint->cur_mask), GFP_KERNEL); |
503 | if (!eint->cur_mask) |
504 | return -ENOMEM; |
505 | |
506 | eint->dual_edge = devm_kcalloc(dev: eint->dev, n: eint->hw->ap_num, |
507 | size: sizeof(int), GFP_KERNEL); |
508 | if (!eint->dual_edge) |
509 | return -ENOMEM; |
510 | |
511 | eint->domain = irq_domain_add_linear(of_node: eint->dev->of_node, |
512 | size: eint->hw->ap_num, |
513 | ops: &irq_domain_simple_ops, NULL); |
514 | if (!eint->domain) |
515 | return -ENOMEM; |
516 | |
517 | if (eint->hw->db_time) { |
518 | for (i = 0; i < MTK_EINT_DBNC_MAX; i++) |
519 | if (eint->hw->db_time[i] == 0) |
520 | break; |
521 | eint->num_db_time = i; |
522 | } |
523 | |
524 | mtk_eint_hw_init(eint); |
525 | for (i = 0; i < eint->hw->ap_num; i++) { |
526 | int virq = irq_create_mapping(host: eint->domain, hwirq: i); |
527 | |
528 | irq_set_chip_and_handler(irq: virq, chip: &mtk_eint_irq_chip, |
529 | handle: handle_level_irq); |
530 | irq_set_chip_data(irq: virq, data: eint); |
531 | } |
532 | |
533 | irq_set_chained_handler_and_data(irq: eint->irq, handle: mtk_eint_irq_handler, |
534 | data: eint); |
535 | |
536 | return 0; |
537 | } |
538 | EXPORT_SYMBOL_GPL(mtk_eint_do_init); |
539 | |
540 | MODULE_LICENSE("GPL v2" ); |
541 | MODULE_DESCRIPTION("MediaTek EINT Driver" ); |
542 | |