1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * SuperH Timer Support - TMU |
4 | * |
5 | * Copyright (C) 2009 Magnus Damm |
6 | */ |
7 | |
8 | #include <linux/clk.h> |
9 | #include <linux/clockchips.h> |
10 | #include <linux/clocksource.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/err.h> |
13 | #include <linux/init.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> |
16 | #include <linux/ioport.h> |
17 | #include <linux/irq.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/sh_timer.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> |
26 | |
27 | #ifdef CONFIG_SUPERH |
28 | #include <asm/platform_early.h> |
29 | #endif |
30 | |
31 | enum sh_tmu_model { |
32 | SH_TMU, |
33 | SH_TMU_SH3, |
34 | }; |
35 | |
36 | struct sh_tmu_device; |
37 | |
38 | struct sh_tmu_channel { |
39 | struct sh_tmu_device *tmu; |
40 | unsigned int index; |
41 | |
42 | void __iomem *base; |
43 | int irq; |
44 | |
45 | unsigned long periodic; |
46 | struct clock_event_device ced; |
47 | struct clocksource cs; |
48 | bool cs_enabled; |
49 | unsigned int enable_count; |
50 | }; |
51 | |
52 | struct sh_tmu_device { |
53 | struct platform_device *pdev; |
54 | |
55 | void __iomem *mapbase; |
56 | struct clk *clk; |
57 | unsigned long rate; |
58 | |
59 | enum sh_tmu_model model; |
60 | |
61 | raw_spinlock_t lock; /* Protect the shared start/stop register */ |
62 | |
63 | struct sh_tmu_channel *channels; |
64 | unsigned int num_channels; |
65 | |
66 | bool has_clockevent; |
67 | bool has_clocksource; |
68 | }; |
69 | |
70 | #define TSTR -1 /* shared register */ |
71 | #define TCOR 0 /* channel register */ |
72 | #define TCNT 1 /* channel register */ |
73 | #define TCR 2 /* channel register */ |
74 | |
75 | #define TCR_UNF (1 << 8) |
76 | #define TCR_UNIE (1 << 5) |
77 | #define TCR_TPSC_CLK4 (0 << 0) |
78 | #define TCR_TPSC_CLK16 (1 << 0) |
79 | #define TCR_TPSC_CLK64 (2 << 0) |
80 | #define TCR_TPSC_CLK256 (3 << 0) |
81 | #define TCR_TPSC_CLK1024 (4 << 0) |
82 | #define TCR_TPSC_MASK (7 << 0) |
83 | |
84 | static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) |
85 | { |
86 | unsigned long offs; |
87 | |
88 | if (reg_nr == TSTR) { |
89 | switch (ch->tmu->model) { |
90 | case SH_TMU_SH3: |
91 | return ioread8(ch->tmu->mapbase + 2); |
92 | case SH_TMU: |
93 | return ioread8(ch->tmu->mapbase + 4); |
94 | } |
95 | } |
96 | |
97 | offs = reg_nr << 2; |
98 | |
99 | if (reg_nr == TCR) |
100 | return ioread16(ch->base + offs); |
101 | else |
102 | return ioread32(ch->base + offs); |
103 | } |
104 | |
105 | static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, |
106 | unsigned long value) |
107 | { |
108 | unsigned long offs; |
109 | |
110 | if (reg_nr == TSTR) { |
111 | switch (ch->tmu->model) { |
112 | case SH_TMU_SH3: |
113 | return iowrite8(value, ch->tmu->mapbase + 2); |
114 | case SH_TMU: |
115 | return iowrite8(value, ch->tmu->mapbase + 4); |
116 | } |
117 | } |
118 | |
119 | offs = reg_nr << 2; |
120 | |
121 | if (reg_nr == TCR) |
122 | iowrite16(value, ch->base + offs); |
123 | else |
124 | iowrite32(value, ch->base + offs); |
125 | } |
126 | |
127 | static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) |
128 | { |
129 | unsigned long flags, value; |
130 | |
131 | /* start stop register shared by multiple timer channels */ |
132 | raw_spin_lock_irqsave(&ch->tmu->lock, flags); |
133 | value = sh_tmu_read(ch, TSTR); |
134 | |
135 | if (start) |
136 | value |= 1 << ch->index; |
137 | else |
138 | value &= ~(1 << ch->index); |
139 | |
140 | sh_tmu_write(ch, TSTR, value); |
141 | raw_spin_unlock_irqrestore(&ch->tmu->lock, flags); |
142 | } |
143 | |
144 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
145 | { |
146 | int ret; |
147 | |
148 | /* enable clock */ |
149 | ret = clk_enable(clk: ch->tmu->clk); |
150 | if (ret) { |
151 | dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n" , |
152 | ch->index); |
153 | return ret; |
154 | } |
155 | |
156 | /* make sure channel is disabled */ |
157 | sh_tmu_start_stop_ch(ch, start: 0); |
158 | |
159 | /* maximum timeout */ |
160 | sh_tmu_write(ch, TCOR, value: 0xffffffff); |
161 | sh_tmu_write(ch, TCNT, value: 0xffffffff); |
162 | |
163 | /* configure channel to parent clock / 4, irq off */ |
164 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
165 | |
166 | /* enable channel */ |
167 | sh_tmu_start_stop_ch(ch, start: 1); |
168 | |
169 | return 0; |
170 | } |
171 | |
172 | static int sh_tmu_enable(struct sh_tmu_channel *ch) |
173 | { |
174 | if (ch->enable_count++ > 0) |
175 | return 0; |
176 | |
177 | pm_runtime_get_sync(dev: &ch->tmu->pdev->dev); |
178 | dev_pm_syscore_device(dev: &ch->tmu->pdev->dev, val: true); |
179 | |
180 | return __sh_tmu_enable(ch); |
181 | } |
182 | |
183 | static void __sh_tmu_disable(struct sh_tmu_channel *ch) |
184 | { |
185 | /* disable channel */ |
186 | sh_tmu_start_stop_ch(ch, start: 0); |
187 | |
188 | /* disable interrupts in TMU block */ |
189 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
190 | |
191 | /* stop clock */ |
192 | clk_disable(clk: ch->tmu->clk); |
193 | } |
194 | |
195 | static void sh_tmu_disable(struct sh_tmu_channel *ch) |
196 | { |
197 | if (WARN_ON(ch->enable_count == 0)) |
198 | return; |
199 | |
200 | if (--ch->enable_count > 0) |
201 | return; |
202 | |
203 | __sh_tmu_disable(ch); |
204 | |
205 | dev_pm_syscore_device(dev: &ch->tmu->pdev->dev, val: false); |
206 | pm_runtime_put(dev: &ch->tmu->pdev->dev); |
207 | } |
208 | |
209 | static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, |
210 | int periodic) |
211 | { |
212 | /* stop timer */ |
213 | sh_tmu_start_stop_ch(ch, start: 0); |
214 | |
215 | /* acknowledge interrupt */ |
216 | sh_tmu_read(ch, TCR); |
217 | |
218 | /* enable interrupt */ |
219 | sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); |
220 | |
221 | /* reload delta value in case of periodic timer */ |
222 | if (periodic) |
223 | sh_tmu_write(ch, TCOR, value: delta); |
224 | else |
225 | sh_tmu_write(ch, TCOR, value: 0xffffffff); |
226 | |
227 | sh_tmu_write(ch, TCNT, value: delta); |
228 | |
229 | /* start timer */ |
230 | sh_tmu_start_stop_ch(ch, start: 1); |
231 | } |
232 | |
233 | static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) |
234 | { |
235 | struct sh_tmu_channel *ch = dev_id; |
236 | |
237 | /* disable or acknowledge interrupt */ |
238 | if (clockevent_state_oneshot(dev: &ch->ced)) |
239 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
240 | else |
241 | sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); |
242 | |
243 | /* notify clockevent layer */ |
244 | ch->ced.event_handler(&ch->ced); |
245 | return IRQ_HANDLED; |
246 | } |
247 | |
248 | static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) |
249 | { |
250 | return container_of(cs, struct sh_tmu_channel, cs); |
251 | } |
252 | |
253 | static u64 sh_tmu_clocksource_read(struct clocksource *cs) |
254 | { |
255 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
256 | |
257 | return sh_tmu_read(ch, TCNT) ^ 0xffffffff; |
258 | } |
259 | |
260 | static int sh_tmu_clocksource_enable(struct clocksource *cs) |
261 | { |
262 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
263 | int ret; |
264 | |
265 | if (WARN_ON(ch->cs_enabled)) |
266 | return 0; |
267 | |
268 | ret = sh_tmu_enable(ch); |
269 | if (!ret) |
270 | ch->cs_enabled = true; |
271 | |
272 | return ret; |
273 | } |
274 | |
275 | static void sh_tmu_clocksource_disable(struct clocksource *cs) |
276 | { |
277 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
278 | |
279 | if (WARN_ON(!ch->cs_enabled)) |
280 | return; |
281 | |
282 | sh_tmu_disable(ch); |
283 | ch->cs_enabled = false; |
284 | } |
285 | |
286 | static void sh_tmu_clocksource_suspend(struct clocksource *cs) |
287 | { |
288 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
289 | |
290 | if (!ch->cs_enabled) |
291 | return; |
292 | |
293 | if (--ch->enable_count == 0) { |
294 | __sh_tmu_disable(ch); |
295 | dev_pm_genpd_suspend(dev: &ch->tmu->pdev->dev); |
296 | } |
297 | } |
298 | |
299 | static void sh_tmu_clocksource_resume(struct clocksource *cs) |
300 | { |
301 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
302 | |
303 | if (!ch->cs_enabled) |
304 | return; |
305 | |
306 | if (ch->enable_count++ == 0) { |
307 | dev_pm_genpd_resume(dev: &ch->tmu->pdev->dev); |
308 | __sh_tmu_enable(ch); |
309 | } |
310 | } |
311 | |
312 | static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, |
313 | const char *name) |
314 | { |
315 | struct clocksource *cs = &ch->cs; |
316 | |
317 | cs->name = name; |
318 | cs->rating = 200; |
319 | cs->read = sh_tmu_clocksource_read; |
320 | cs->enable = sh_tmu_clocksource_enable; |
321 | cs->disable = sh_tmu_clocksource_disable; |
322 | cs->suspend = sh_tmu_clocksource_suspend; |
323 | cs->resume = sh_tmu_clocksource_resume; |
324 | cs->mask = CLOCKSOURCE_MASK(32); |
325 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
326 | |
327 | dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n" , |
328 | ch->index); |
329 | |
330 | clocksource_register_hz(cs, hz: ch->tmu->rate); |
331 | return 0; |
332 | } |
333 | |
334 | static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) |
335 | { |
336 | return container_of(ced, struct sh_tmu_channel, ced); |
337 | } |
338 | |
339 | static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) |
340 | { |
341 | sh_tmu_enable(ch); |
342 | |
343 | if (periodic) { |
344 | ch->periodic = (ch->tmu->rate + HZ/2) / HZ; |
345 | sh_tmu_set_next(ch, delta: ch->periodic, periodic: 1); |
346 | } |
347 | } |
348 | |
349 | static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced) |
350 | { |
351 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
352 | |
353 | if (clockevent_state_oneshot(dev: ced) || clockevent_state_periodic(dev: ced)) |
354 | sh_tmu_disable(ch); |
355 | return 0; |
356 | } |
357 | |
358 | static int sh_tmu_clock_event_set_state(struct clock_event_device *ced, |
359 | int periodic) |
360 | { |
361 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
362 | |
363 | /* deal with old setting first */ |
364 | if (clockevent_state_oneshot(dev: ced) || clockevent_state_periodic(dev: ced)) |
365 | sh_tmu_disable(ch); |
366 | |
367 | dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n" , |
368 | ch->index, periodic ? "periodic" : "oneshot" ); |
369 | sh_tmu_clock_event_start(ch, periodic); |
370 | return 0; |
371 | } |
372 | |
373 | static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced) |
374 | { |
375 | return sh_tmu_clock_event_set_state(ced, periodic: 0); |
376 | } |
377 | |
378 | static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced) |
379 | { |
380 | return sh_tmu_clock_event_set_state(ced, periodic: 1); |
381 | } |
382 | |
383 | static int sh_tmu_clock_event_next(unsigned long delta, |
384 | struct clock_event_device *ced) |
385 | { |
386 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
387 | |
388 | BUG_ON(!clockevent_state_oneshot(ced)); |
389 | |
390 | /* program new delta value */ |
391 | sh_tmu_set_next(ch, delta, periodic: 0); |
392 | return 0; |
393 | } |
394 | |
395 | static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) |
396 | { |
397 | dev_pm_genpd_suspend(dev: &ced_to_sh_tmu(ced)->tmu->pdev->dev); |
398 | } |
399 | |
400 | static void sh_tmu_clock_event_resume(struct clock_event_device *ced) |
401 | { |
402 | dev_pm_genpd_resume(dev: &ced_to_sh_tmu(ced)->tmu->pdev->dev); |
403 | } |
404 | |
405 | static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, |
406 | const char *name) |
407 | { |
408 | struct clock_event_device *ced = &ch->ced; |
409 | int ret; |
410 | |
411 | ced->name = name; |
412 | ced->features = CLOCK_EVT_FEAT_PERIODIC; |
413 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; |
414 | ced->rating = 200; |
415 | ced->cpumask = cpu_possible_mask; |
416 | ced->set_next_event = sh_tmu_clock_event_next; |
417 | ced->set_state_shutdown = sh_tmu_clock_event_shutdown; |
418 | ced->set_state_periodic = sh_tmu_clock_event_set_periodic; |
419 | ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot; |
420 | ced->suspend = sh_tmu_clock_event_suspend; |
421 | ced->resume = sh_tmu_clock_event_resume; |
422 | |
423 | dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n" , |
424 | ch->index); |
425 | |
426 | clockevents_config_and_register(dev: ced, freq: ch->tmu->rate, min_delta: 0x300, max_delta: 0xffffffff); |
427 | |
428 | ret = request_irq(irq: ch->irq, handler: sh_tmu_interrupt, |
429 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
430 | name: dev_name(dev: &ch->tmu->pdev->dev), dev: ch); |
431 | if (ret) { |
432 | dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n" , |
433 | ch->index, ch->irq); |
434 | return; |
435 | } |
436 | } |
437 | |
438 | static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, |
439 | bool clockevent, bool clocksource) |
440 | { |
441 | if (clockevent) { |
442 | ch->tmu->has_clockevent = true; |
443 | sh_tmu_register_clockevent(ch, name); |
444 | } else if (clocksource) { |
445 | ch->tmu->has_clocksource = true; |
446 | sh_tmu_register_clocksource(ch, name); |
447 | } |
448 | |
449 | return 0; |
450 | } |
451 | |
452 | static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, |
453 | bool clockevent, bool clocksource, |
454 | struct sh_tmu_device *tmu) |
455 | { |
456 | /* Skip unused channels. */ |
457 | if (!clockevent && !clocksource) |
458 | return 0; |
459 | |
460 | ch->tmu = tmu; |
461 | ch->index = index; |
462 | |
463 | if (tmu->model == SH_TMU_SH3) |
464 | ch->base = tmu->mapbase + 4 + ch->index * 12; |
465 | else |
466 | ch->base = tmu->mapbase + 8 + ch->index * 12; |
467 | |
468 | ch->irq = platform_get_irq(tmu->pdev, index); |
469 | if (ch->irq < 0) |
470 | return ch->irq; |
471 | |
472 | ch->cs_enabled = false; |
473 | ch->enable_count = 0; |
474 | |
475 | return sh_tmu_register(ch, name: dev_name(dev: &tmu->pdev->dev), |
476 | clockevent, clocksource); |
477 | } |
478 | |
479 | static int sh_tmu_map_memory(struct sh_tmu_device *tmu) |
480 | { |
481 | struct resource *res; |
482 | |
483 | res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); |
484 | if (!res) { |
485 | dev_err(&tmu->pdev->dev, "failed to get I/O memory\n" ); |
486 | return -ENXIO; |
487 | } |
488 | |
489 | tmu->mapbase = ioremap(offset: res->start, size: resource_size(res)); |
490 | if (tmu->mapbase == NULL) |
491 | return -ENXIO; |
492 | |
493 | return 0; |
494 | } |
495 | |
496 | static int sh_tmu_parse_dt(struct sh_tmu_device *tmu) |
497 | { |
498 | struct device_node *np = tmu->pdev->dev.of_node; |
499 | |
500 | tmu->model = SH_TMU; |
501 | tmu->num_channels = 3; |
502 | |
503 | of_property_read_u32(np, propname: "#renesas,channels" , out_value: &tmu->num_channels); |
504 | |
505 | if (tmu->num_channels != 2 && tmu->num_channels != 3) { |
506 | dev_err(&tmu->pdev->dev, "invalid number of channels %u\n" , |
507 | tmu->num_channels); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | return 0; |
512 | } |
513 | |
514 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) |
515 | { |
516 | unsigned int i; |
517 | int ret; |
518 | |
519 | tmu->pdev = pdev; |
520 | |
521 | raw_spin_lock_init(&tmu->lock); |
522 | |
523 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { |
524 | ret = sh_tmu_parse_dt(tmu); |
525 | if (ret < 0) |
526 | return ret; |
527 | } else if (pdev->dev.platform_data) { |
528 | const struct platform_device_id *id = pdev->id_entry; |
529 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
530 | |
531 | tmu->model = id->driver_data; |
532 | tmu->num_channels = hweight8(cfg->channels_mask); |
533 | } else { |
534 | dev_err(&tmu->pdev->dev, "missing platform data\n" ); |
535 | return -ENXIO; |
536 | } |
537 | |
538 | /* Get hold of clock. */ |
539 | tmu->clk = clk_get(dev: &tmu->pdev->dev, id: "fck" ); |
540 | if (IS_ERR(ptr: tmu->clk)) { |
541 | dev_err(&tmu->pdev->dev, "cannot get clock\n" ); |
542 | return PTR_ERR(ptr: tmu->clk); |
543 | } |
544 | |
545 | ret = clk_prepare(clk: tmu->clk); |
546 | if (ret < 0) |
547 | goto err_clk_put; |
548 | |
549 | /* Determine clock rate. */ |
550 | ret = clk_enable(clk: tmu->clk); |
551 | if (ret < 0) |
552 | goto err_clk_unprepare; |
553 | |
554 | tmu->rate = clk_get_rate(clk: tmu->clk) / 4; |
555 | clk_disable(clk: tmu->clk); |
556 | |
557 | /* Map the memory resource. */ |
558 | ret = sh_tmu_map_memory(tmu); |
559 | if (ret < 0) { |
560 | dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n" ); |
561 | goto err_clk_unprepare; |
562 | } |
563 | |
564 | /* Allocate and setup the channels. */ |
565 | tmu->channels = kcalloc(n: tmu->num_channels, size: sizeof(*tmu->channels), |
566 | GFP_KERNEL); |
567 | if (tmu->channels == NULL) { |
568 | ret = -ENOMEM; |
569 | goto err_unmap; |
570 | } |
571 | |
572 | /* |
573 | * Use the first channel as a clock event device and the second channel |
574 | * as a clock source. |
575 | */ |
576 | for (i = 0; i < tmu->num_channels; ++i) { |
577 | ret = sh_tmu_channel_setup(ch: &tmu->channels[i], index: i, |
578 | clockevent: i == 0, clocksource: i == 1, tmu); |
579 | if (ret < 0) |
580 | goto err_unmap; |
581 | } |
582 | |
583 | platform_set_drvdata(pdev, data: tmu); |
584 | |
585 | return 0; |
586 | |
587 | err_unmap: |
588 | kfree(objp: tmu->channels); |
589 | iounmap(addr: tmu->mapbase); |
590 | err_clk_unprepare: |
591 | clk_unprepare(clk: tmu->clk); |
592 | err_clk_put: |
593 | clk_put(clk: tmu->clk); |
594 | return ret; |
595 | } |
596 | |
597 | static int sh_tmu_probe(struct platform_device *pdev) |
598 | { |
599 | struct sh_tmu_device *tmu = platform_get_drvdata(pdev); |
600 | int ret; |
601 | |
602 | if (!is_sh_early_platform_device(pdev)) { |
603 | pm_runtime_set_active(dev: &pdev->dev); |
604 | pm_runtime_enable(dev: &pdev->dev); |
605 | } |
606 | |
607 | if (tmu) { |
608 | dev_info(&pdev->dev, "kept as earlytimer\n" ); |
609 | goto out; |
610 | } |
611 | |
612 | tmu = kzalloc(size: sizeof(*tmu), GFP_KERNEL); |
613 | if (tmu == NULL) |
614 | return -ENOMEM; |
615 | |
616 | ret = sh_tmu_setup(tmu, pdev); |
617 | if (ret) { |
618 | kfree(objp: tmu); |
619 | pm_runtime_idle(dev: &pdev->dev); |
620 | return ret; |
621 | } |
622 | |
623 | if (is_sh_early_platform_device(pdev)) |
624 | return 0; |
625 | |
626 | out: |
627 | if (tmu->has_clockevent || tmu->has_clocksource) |
628 | pm_runtime_irq_safe(dev: &pdev->dev); |
629 | else |
630 | pm_runtime_idle(dev: &pdev->dev); |
631 | |
632 | return 0; |
633 | } |
634 | |
635 | static const struct platform_device_id sh_tmu_id_table[] = { |
636 | { "sh-tmu" , SH_TMU }, |
637 | { "sh-tmu-sh3" , SH_TMU_SH3 }, |
638 | { } |
639 | }; |
640 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); |
641 | |
642 | static const struct of_device_id sh_tmu_of_table[] __maybe_unused = { |
643 | { .compatible = "renesas,tmu" }, |
644 | { } |
645 | }; |
646 | MODULE_DEVICE_TABLE(of, sh_tmu_of_table); |
647 | |
648 | static struct platform_driver sh_tmu_device_driver = { |
649 | .probe = sh_tmu_probe, |
650 | .driver = { |
651 | .name = "sh_tmu" , |
652 | .of_match_table = of_match_ptr(sh_tmu_of_table), |
653 | .suppress_bind_attrs = true, |
654 | }, |
655 | .id_table = sh_tmu_id_table, |
656 | }; |
657 | |
658 | static int __init sh_tmu_init(void) |
659 | { |
660 | return platform_driver_register(&sh_tmu_device_driver); |
661 | } |
662 | |
663 | static void __exit sh_tmu_exit(void) |
664 | { |
665 | platform_driver_unregister(&sh_tmu_device_driver); |
666 | } |
667 | |
668 | #ifdef CONFIG_SUPERH |
669 | sh_early_platform_init("earlytimer" , &sh_tmu_device_driver); |
670 | #endif |
671 | |
672 | subsys_initcall(sh_tmu_init); |
673 | module_exit(sh_tmu_exit); |
674 | |
675 | MODULE_AUTHOR("Magnus Damm" ); |
676 | MODULE_DESCRIPTION("SuperH TMU Timer Driver" ); |
677 | |