1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
5 | */ |
6 | |
7 | /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be |
8 | * programmed to go from @count to @limit and optionally interrupt. |
9 | * We've designated TIMER0 for clockevents and TIMER1 for clocksource |
10 | * |
11 | * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP) |
12 | * which are suitable for UP and SMP based clocksources respectively |
13 | */ |
14 | |
15 | #include <linux/interrupt.h> |
16 | #include <linux/bits.h> |
17 | #include <linux/clk.h> |
18 | #include <linux/clk-provider.h> |
19 | #include <linux/clocksource.h> |
20 | #include <linux/clockchips.h> |
21 | #include <linux/cpu.h> |
22 | #include <linux/of.h> |
23 | #include <linux/of_irq.h> |
24 | #include <linux/sched_clock.h> |
25 | |
26 | #include <soc/arc/timers.h> |
27 | #include <soc/arc/mcip.h> |
28 | |
29 | |
30 | static unsigned long arc_timer_freq; |
31 | |
32 | static int noinline arc_get_timer_clk(struct device_node *node) |
33 | { |
34 | struct clk *clk; |
35 | int ret; |
36 | |
37 | clk = of_clk_get(np: node, index: 0); |
38 | if (IS_ERR(ptr: clk)) { |
39 | pr_err("timer missing clk\n" ); |
40 | return PTR_ERR(ptr: clk); |
41 | } |
42 | |
43 | ret = clk_prepare_enable(clk); |
44 | if (ret) { |
45 | pr_err("Couldn't enable parent clk\n" ); |
46 | return ret; |
47 | } |
48 | |
49 | arc_timer_freq = clk_get_rate(clk); |
50 | |
51 | return 0; |
52 | } |
53 | |
54 | /********** Clock Source Device *********/ |
55 | |
56 | #ifdef CONFIG_ARC_TIMERS_64BIT |
57 | |
58 | static u64 arc_read_gfrc(struct clocksource *cs) |
59 | { |
60 | unsigned long flags; |
61 | u32 l, h; |
62 | |
63 | /* |
64 | * From a programming model pov, there seems to be just one instance of |
65 | * MCIP_CMD/MCIP_READBACK however micro-architecturally there's |
66 | * an instance PER ARC CORE (not per cluster), and there are dedicated |
67 | * hardware decode logic (per core) inside ARConnect to handle |
68 | * simultaneous read/write accesses from cores via those two registers. |
69 | * So several concurrent commands to ARConnect are OK if they are |
70 | * trying to access two different sub-components (like GFRC, |
71 | * inter-core interrupt, etc...). HW also supports simultaneously |
72 | * accessing GFRC by multiple cores. |
73 | * That's why it is safe to disable hard interrupts on the local CPU |
74 | * before access to GFRC instead of taking global MCIP spinlock |
75 | * defined in arch/arc/kernel/mcip.c |
76 | */ |
77 | local_irq_save(flags); |
78 | |
79 | __mcip_cmd(CMD_GFRC_READ_LO, 0); |
80 | l = read_aux_reg(ARC_REG_MCIP_READBACK); |
81 | |
82 | __mcip_cmd(CMD_GFRC_READ_HI, 0); |
83 | h = read_aux_reg(ARC_REG_MCIP_READBACK); |
84 | |
85 | local_irq_restore(flags); |
86 | |
87 | return (((u64)h) << 32) | l; |
88 | } |
89 | |
90 | static notrace u64 arc_gfrc_clock_read(void) |
91 | { |
92 | return arc_read_gfrc(NULL); |
93 | } |
94 | |
95 | static struct clocksource arc_counter_gfrc = { |
96 | .name = "ARConnect GFRC" , |
97 | .rating = 400, |
98 | .read = arc_read_gfrc, |
99 | .mask = CLOCKSOURCE_MASK(64), |
100 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
101 | }; |
102 | |
103 | static int __init arc_cs_setup_gfrc(struct device_node *node) |
104 | { |
105 | struct mcip_bcr mp; |
106 | int ret; |
107 | |
108 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
109 | if (!mp.gfrc) { |
110 | pr_warn("Global-64-bit-Ctr clocksource not detected\n" ); |
111 | return -ENXIO; |
112 | } |
113 | |
114 | ret = arc_get_timer_clk(node); |
115 | if (ret) |
116 | return ret; |
117 | |
118 | sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq); |
119 | |
120 | return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); |
121 | } |
122 | TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc" , arc_cs_setup_gfrc); |
123 | |
124 | #define AUX_RTC_CTRL 0x103 |
125 | #define AUX_RTC_LOW 0x104 |
126 | #define AUX_RTC_HIGH 0x105 |
127 | |
128 | static u64 arc_read_rtc(struct clocksource *cs) |
129 | { |
130 | unsigned long status; |
131 | u32 l, h; |
132 | |
133 | /* |
134 | * hardware has an internal state machine which tracks readout of |
135 | * low/high and updates the CTRL.status if |
136 | * - interrupt/exception taken between the two reads |
137 | * - high increments after low has been read |
138 | */ |
139 | do { |
140 | l = read_aux_reg(AUX_RTC_LOW); |
141 | h = read_aux_reg(AUX_RTC_HIGH); |
142 | status = read_aux_reg(AUX_RTC_CTRL); |
143 | } while (!(status & BIT(31))); |
144 | |
145 | return (((u64)h) << 32) | l; |
146 | } |
147 | |
148 | static notrace u64 arc_rtc_clock_read(void) |
149 | { |
150 | return arc_read_rtc(NULL); |
151 | } |
152 | |
153 | static struct clocksource arc_counter_rtc = { |
154 | .name = "ARCv2 RTC" , |
155 | .rating = 350, |
156 | .read = arc_read_rtc, |
157 | .mask = CLOCKSOURCE_MASK(64), |
158 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
159 | }; |
160 | |
161 | static int __init arc_cs_setup_rtc(struct device_node *node) |
162 | { |
163 | struct bcr_timer timer; |
164 | int ret; |
165 | |
166 | READ_BCR(ARC_REG_TIMERS_BCR, timer); |
167 | if (!timer.rtc) { |
168 | pr_warn("Local-64-bit-Ctr clocksource not detected\n" ); |
169 | return -ENXIO; |
170 | } |
171 | |
172 | /* Local to CPU hence not usable in SMP */ |
173 | if (IS_ENABLED(CONFIG_SMP)) { |
174 | pr_warn("Local-64-bit-Ctr not usable in SMP\n" ); |
175 | return -EINVAL; |
176 | } |
177 | |
178 | ret = arc_get_timer_clk(node); |
179 | if (ret) |
180 | return ret; |
181 | |
182 | write_aux_reg(AUX_RTC_CTRL, 1); |
183 | |
184 | sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq); |
185 | |
186 | return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); |
187 | } |
188 | TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc" , arc_cs_setup_rtc); |
189 | |
190 | #endif |
191 | |
192 | /* |
193 | * 32bit TIMER1 to keep counting monotonically and wraparound |
194 | */ |
195 | |
196 | static u64 arc_read_timer1(struct clocksource *cs) |
197 | { |
198 | return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); |
199 | } |
200 | |
201 | static notrace u64 arc_timer1_clock_read(void) |
202 | { |
203 | return arc_read_timer1(NULL); |
204 | } |
205 | |
206 | static struct clocksource arc_counter_timer1 = { |
207 | .name = "ARC Timer1" , |
208 | .rating = 300, |
209 | .read = arc_read_timer1, |
210 | .mask = CLOCKSOURCE_MASK(32), |
211 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
212 | }; |
213 | |
214 | static int __init arc_cs_setup_timer1(struct device_node *node) |
215 | { |
216 | int ret; |
217 | |
218 | /* Local to CPU hence not usable in SMP */ |
219 | if (IS_ENABLED(CONFIG_SMP)) |
220 | return -EINVAL; |
221 | |
222 | ret = arc_get_timer_clk(node); |
223 | if (ret) |
224 | return ret; |
225 | |
226 | write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX); |
227 | write_aux_reg(ARC_REG_TIMER1_CNT, v: 0); |
228 | write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH); |
229 | |
230 | sched_clock_register(read: arc_timer1_clock_read, bits: 32, rate: arc_timer_freq); |
231 | |
232 | return clocksource_register_hz(cs: &arc_counter_timer1, hz: arc_timer_freq); |
233 | } |
234 | |
235 | /********** Clock Event Device *********/ |
236 | |
237 | static int arc_timer_irq; |
238 | |
239 | /* |
240 | * Arm the timer to interrupt after @cycles |
241 | * The distinction for oneshot/periodic is done in arc_event_timer_ack() below |
242 | */ |
243 | static void arc_timer_event_setup(unsigned int cycles) |
244 | { |
245 | write_aux_reg(ARC_REG_TIMER0_LIMIT, v: cycles); |
246 | write_aux_reg(ARC_REG_TIMER0_CNT, v: 0); /* start from 0 */ |
247 | |
248 | write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH); |
249 | } |
250 | |
251 | |
252 | static int arc_clkevent_set_next_event(unsigned long delta, |
253 | struct clock_event_device *dev) |
254 | { |
255 | arc_timer_event_setup(cycles: delta); |
256 | return 0; |
257 | } |
258 | |
259 | static int arc_clkevent_set_periodic(struct clock_event_device *dev) |
260 | { |
261 | /* |
262 | * At X Hz, 1 sec = 1000ms -> X cycles; |
263 | * 10ms -> X / 100 cycles |
264 | */ |
265 | arc_timer_event_setup(cycles: arc_timer_freq / HZ); |
266 | return 0; |
267 | } |
268 | |
269 | static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { |
270 | .name = "ARC Timer0" , |
271 | .features = CLOCK_EVT_FEAT_ONESHOT | |
272 | CLOCK_EVT_FEAT_PERIODIC, |
273 | .rating = 300, |
274 | .set_next_event = arc_clkevent_set_next_event, |
275 | .set_state_periodic = arc_clkevent_set_periodic, |
276 | }; |
277 | |
278 | static irqreturn_t timer_irq_handler(int irq, void *dev_id) |
279 | { |
280 | /* |
281 | * Note that generic IRQ core could have passed @evt for @dev_id if |
282 | * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() |
283 | */ |
284 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
285 | int irq_reenable = clockevent_state_periodic(dev: evt); |
286 | |
287 | /* |
288 | * 1. ACK the interrupt |
289 | * - For ARC700, any write to CTRL reg ACKs it, so just rewrite |
290 | * Count when [N]ot [H]alted bit. |
291 | * - For HS3x, it is a bit subtle. On taken count-down interrupt, |
292 | * IP bit [3] is set, which needs to be cleared for ACK'ing. |
293 | * The write below can only update the other two bits, hence |
294 | * explicitly clears IP bit |
295 | * 2. Re-arm interrupt if periodic by writing to IE bit [0] |
296 | */ |
297 | write_aux_reg(ARC_REG_TIMER0_CTRL, v: irq_reenable | ARC_TIMER_CTRL_NH); |
298 | |
299 | evt->event_handler(evt); |
300 | |
301 | return IRQ_HANDLED; |
302 | } |
303 | |
304 | |
305 | static int arc_timer_starting_cpu(unsigned int cpu) |
306 | { |
307 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
308 | |
309 | evt->cpumask = cpumask_of(smp_processor_id()); |
310 | |
311 | clockevents_config_and_register(dev: evt, freq: arc_timer_freq, min_delta: 0, ARC_TIMERN_MAX); |
312 | enable_percpu_irq(irq: arc_timer_irq, type: 0); |
313 | return 0; |
314 | } |
315 | |
316 | static int arc_timer_dying_cpu(unsigned int cpu) |
317 | { |
318 | disable_percpu_irq(irq: arc_timer_irq); |
319 | return 0; |
320 | } |
321 | |
322 | /* |
323 | * clockevent setup for boot CPU |
324 | */ |
325 | static int __init arc_clockevent_setup(struct device_node *node) |
326 | { |
327 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
328 | int ret; |
329 | |
330 | arc_timer_irq = irq_of_parse_and_map(node, index: 0); |
331 | if (arc_timer_irq <= 0) { |
332 | pr_err("clockevent: missing irq\n" ); |
333 | return -EINVAL; |
334 | } |
335 | |
336 | ret = arc_get_timer_clk(node); |
337 | if (ret) |
338 | return ret; |
339 | |
340 | /* Needs apriori irq_set_percpu_devid() done in intc map function */ |
341 | ret = request_percpu_irq(irq: arc_timer_irq, handler: timer_irq_handler, |
342 | devname: "Timer0 (per-cpu-tick)" , percpu_dev_id: evt); |
343 | if (ret) { |
344 | pr_err("clockevent: unable to request irq\n" ); |
345 | return ret; |
346 | } |
347 | |
348 | ret = cpuhp_setup_state(state: CPUHP_AP_ARC_TIMER_STARTING, |
349 | name: "clockevents/arc/timer:starting" , |
350 | startup: arc_timer_starting_cpu, |
351 | teardown: arc_timer_dying_cpu); |
352 | if (ret) { |
353 | pr_err("Failed to setup hotplug state\n" ); |
354 | return ret; |
355 | } |
356 | return 0; |
357 | } |
358 | |
359 | static int __init arc_of_timer_init(struct device_node *np) |
360 | { |
361 | static int init_count = 0; |
362 | int ret; |
363 | |
364 | if (!init_count) { |
365 | init_count = 1; |
366 | ret = arc_clockevent_setup(node: np); |
367 | } else { |
368 | ret = arc_cs_setup_timer1(node: np); |
369 | } |
370 | |
371 | return ret; |
372 | } |
373 | TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer" , arc_of_timer_init); |
374 | |