This source file includes following definitions.
- tc_get_cycles
- tc_get_cycles32
- tc_clksrc_suspend
- tc_clksrc_resume
- tc_sched_clock_read
- tc_sched_clock_read32
- tc_delay_timer_read
- tc_delay_timer_read32
- to_tc_clkevt
- tc_shutdown
- tc_set_oneshot
- tc_set_periodic
- tc_next_event
- ch2_irq
- setup_clkevents
- setup_clkevents
- tcb_setup_dual_chan
- tcb_setup_single_chan
- tcb_clksrc_init
1
2 #include <linux/init.h>
3 #include <linux/clocksource.h>
4 #include <linux/clockchips.h>
5 #include <linux/interrupt.h>
6 #include <linux/irq.h>
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/err.h>
11 #include <linux/ioport.h>
12 #include <linux/io.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15 #include <linux/sched_clock.h>
16 #include <linux/syscore_ops.h>
17 #include <soc/at91/atmel_tcb.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 static void __iomem *tcaddr;
41 static struct
42 {
43 u32 cmr;
44 u32 imr;
45 u32 rc;
46 bool clken;
47 } tcb_cache[3];
48 static u32 bmr_cache;
49
50 static u64 tc_get_cycles(struct clocksource *cs)
51 {
52 unsigned long flags;
53 u32 lower, upper;
54
55 raw_local_irq_save(flags);
56 do {
57 upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
58 lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
59 } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
60
61 raw_local_irq_restore(flags);
62 return (upper << 16) | lower;
63 }
64
65 static u64 tc_get_cycles32(struct clocksource *cs)
66 {
67 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
68 }
69
70 static void tc_clksrc_suspend(struct clocksource *cs)
71 {
72 int i;
73
74 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
75 tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
76 tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
77 tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
78 tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
79 ATMEL_TC_CLKSTA);
80 }
81
82 bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
83 }
84
85 static void tc_clksrc_resume(struct clocksource *cs)
86 {
87 int i;
88
89 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
90
91 writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
92 writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
93 writel(0, tcaddr + ATMEL_TC_REG(i, RA));
94 writel(0, tcaddr + ATMEL_TC_REG(i, RB));
95
96 writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
97
98 writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
99
100 if (tcb_cache[i].clken)
101 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
102 }
103
104
105 writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
106
107 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
108 }
109
110 static struct clocksource clksrc = {
111 .rating = 200,
112 .read = tc_get_cycles,
113 .mask = CLOCKSOURCE_MASK(32),
114 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
115 .suspend = tc_clksrc_suspend,
116 .resume = tc_clksrc_resume,
117 };
118
119 static u64 notrace tc_sched_clock_read(void)
120 {
121 return tc_get_cycles(&clksrc);
122 }
123
124 static u64 notrace tc_sched_clock_read32(void)
125 {
126 return tc_get_cycles32(&clksrc);
127 }
128
129 static struct delay_timer tc_delay_timer;
130
131 static unsigned long tc_delay_timer_read(void)
132 {
133 return tc_get_cycles(&clksrc);
134 }
135
136 static unsigned long notrace tc_delay_timer_read32(void)
137 {
138 return tc_get_cycles32(&clksrc);
139 }
140
141 #ifdef CONFIG_GENERIC_CLOCKEVENTS
142
143 struct tc_clkevt_device {
144 struct clock_event_device clkevt;
145 struct clk *clk;
146 void __iomem *regs;
147 };
148
149 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
150 {
151 return container_of(clkevt, struct tc_clkevt_device, clkevt);
152 }
153
154
155
156
157
158
159
160
161 static u32 timer_clock;
162
163 static int tc_shutdown(struct clock_event_device *d)
164 {
165 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
166 void __iomem *regs = tcd->regs;
167
168 writel(0xff, regs + ATMEL_TC_REG(2, IDR));
169 writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
170 if (!clockevent_state_detached(d))
171 clk_disable(tcd->clk);
172
173 return 0;
174 }
175
176 static int tc_set_oneshot(struct clock_event_device *d)
177 {
178 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
179 void __iomem *regs = tcd->regs;
180
181 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
182 tc_shutdown(d);
183
184 clk_enable(tcd->clk);
185
186
187 writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
188 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
189 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
190
191
192 return 0;
193 }
194
195 static int tc_set_periodic(struct clock_event_device *d)
196 {
197 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
198 void __iomem *regs = tcd->regs;
199
200 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
201 tc_shutdown(d);
202
203
204
205
206 clk_enable(tcd->clk);
207
208
209 writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
210 regs + ATMEL_TC_REG(2, CMR));
211 writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
212
213
214 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
215
216
217 writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
218 ATMEL_TC_REG(2, CCR));
219 return 0;
220 }
221
222 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
223 {
224 writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
225
226
227 writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
228 tcaddr + ATMEL_TC_REG(2, CCR));
229 return 0;
230 }
231
232 static struct tc_clkevt_device clkevt = {
233 .clkevt = {
234 .features = CLOCK_EVT_FEAT_PERIODIC |
235 CLOCK_EVT_FEAT_ONESHOT,
236
237 .rating = 125,
238 .set_next_event = tc_next_event,
239 .set_state_shutdown = tc_shutdown,
240 .set_state_periodic = tc_set_periodic,
241 .set_state_oneshot = tc_set_oneshot,
242 },
243 };
244
245 static irqreturn_t ch2_irq(int irq, void *handle)
246 {
247 struct tc_clkevt_device *dev = handle;
248 unsigned int sr;
249
250 sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
251 if (sr & ATMEL_TC_CPCS) {
252 dev->clkevt.event_handler(&dev->clkevt);
253 return IRQ_HANDLED;
254 }
255
256 return IRQ_NONE;
257 }
258
259 static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
260 {
261 int ret;
262 struct clk *t2_clk = tc->clk[2];
263 int irq = tc->irq[2];
264
265 ret = clk_prepare_enable(tc->slow_clk);
266 if (ret)
267 return ret;
268
269
270 ret = clk_prepare_enable(t2_clk);
271 if (ret) {
272 clk_disable_unprepare(tc->slow_clk);
273 return ret;
274 }
275
276 clk_disable(t2_clk);
277
278 clkevt.regs = tc->regs;
279 clkevt.clk = t2_clk;
280
281 timer_clock = clk32k_divisor_idx;
282
283 clkevt.clkevt.cpumask = cpumask_of(0);
284
285 ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
286 if (ret) {
287 clk_unprepare(t2_clk);
288 clk_disable_unprepare(tc->slow_clk);
289 return ret;
290 }
291
292 clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
293
294 return ret;
295 }
296
297 #else
298
299 static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
300 {
301
302 return 0;
303 }
304
305 #endif
306
307 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
308 {
309
310 writel(mck_divisor_idx
311 | ATMEL_TC_WAVE
312 | ATMEL_TC_WAVESEL_UP
313 | ATMEL_TC_ACPA_SET
314 | ATMEL_TC_ACPC_CLEAR,
315 tcaddr + ATMEL_TC_REG(0, CMR));
316 writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
317 writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
318 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));
319 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
320
321
322 writel(ATMEL_TC_XC1
323 | ATMEL_TC_WAVE
324 | ATMEL_TC_WAVESEL_UP,
325 tcaddr + ATMEL_TC_REG(1, CMR));
326 writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));
327 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
328
329
330 writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
331
332 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
333 }
334
335 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
336 {
337
338 writel(mck_divisor_idx
339 | ATMEL_TC_WAVE
340 | ATMEL_TC_WAVESEL_UP,
341 tcaddr + ATMEL_TC_REG(0, CMR));
342 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));
343 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
344
345
346 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
347 }
348
349 static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
350
351 static const struct of_device_id atmel_tcb_of_match[] = {
352 { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
353 { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
354 { }
355 };
356
357 static int __init tcb_clksrc_init(struct device_node *node)
358 {
359 struct atmel_tc tc;
360 struct clk *t0_clk;
361 const struct of_device_id *match;
362 u64 (*tc_sched_clock)(void);
363 u32 rate, divided_rate = 0;
364 int best_divisor_idx = -1;
365 int clk32k_divisor_idx = -1;
366 int bits;
367 int i;
368 int ret;
369
370
371 if (tcaddr)
372 return 0;
373
374 tc.regs = of_iomap(node->parent, 0);
375 if (!tc.regs)
376 return -ENXIO;
377
378 t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
379 if (IS_ERR(t0_clk))
380 return PTR_ERR(t0_clk);
381
382 tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
383 if (IS_ERR(tc.slow_clk))
384 return PTR_ERR(tc.slow_clk);
385
386 tc.clk[0] = t0_clk;
387 tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
388 if (IS_ERR(tc.clk[1]))
389 tc.clk[1] = t0_clk;
390 tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
391 if (IS_ERR(tc.clk[2]))
392 tc.clk[2] = t0_clk;
393
394 tc.irq[2] = of_irq_get(node->parent, 2);
395 if (tc.irq[2] <= 0) {
396 tc.irq[2] = of_irq_get(node->parent, 0);
397 if (tc.irq[2] <= 0)
398 return -EINVAL;
399 }
400
401 match = of_match_node(atmel_tcb_of_match, node->parent);
402 bits = (uintptr_t)match->data;
403
404 for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
405 writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
406
407 ret = clk_prepare_enable(t0_clk);
408 if (ret) {
409 pr_debug("can't enable T0 clk\n");
410 return ret;
411 }
412
413
414 rate = (u32) clk_get_rate(t0_clk);
415 for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
416 unsigned divisor = atmel_tcb_divisors[i];
417 unsigned tmp;
418
419
420 if (!divisor) {
421 clk32k_divisor_idx = i;
422 continue;
423 }
424
425 tmp = rate / divisor;
426 pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
427 if (best_divisor_idx > 0) {
428 if (tmp < 5 * 1000 * 1000)
429 continue;
430 }
431 divided_rate = tmp;
432 best_divisor_idx = i;
433 }
434
435 clksrc.name = kbasename(node->parent->full_name);
436 clkevt.clkevt.name = kbasename(node->parent->full_name);
437 pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
438 ((divided_rate % 1000000) + 500) / 1000);
439
440 tcaddr = tc.regs;
441
442 if (bits == 32) {
443
444 clksrc.read = tc_get_cycles32;
445
446 tcb_setup_single_chan(&tc, best_divisor_idx);
447 tc_sched_clock = tc_sched_clock_read32;
448 tc_delay_timer.read_current_timer = tc_delay_timer_read32;
449 } else {
450
451
452
453 ret = clk_prepare_enable(tc.clk[1]);
454 if (ret) {
455 pr_debug("can't enable T1 clk\n");
456 goto err_disable_t0;
457 }
458
459 tcb_setup_dual_chan(&tc, best_divisor_idx);
460 tc_sched_clock = tc_sched_clock_read;
461 tc_delay_timer.read_current_timer = tc_delay_timer_read;
462 }
463
464
465 ret = clocksource_register_hz(&clksrc, divided_rate);
466 if (ret)
467 goto err_disable_t1;
468
469
470 ret = setup_clkevents(&tc, clk32k_divisor_idx);
471 if (ret)
472 goto err_unregister_clksrc;
473
474 sched_clock_register(tc_sched_clock, 32, divided_rate);
475
476 tc_delay_timer.freq = divided_rate;
477 register_current_timer_delay(&tc_delay_timer);
478
479 return 0;
480
481 err_unregister_clksrc:
482 clocksource_unregister(&clksrc);
483
484 err_disable_t1:
485 if (bits != 32)
486 clk_disable_unprepare(tc.clk[1]);
487
488 err_disable_t0:
489 clk_disable_unprepare(t0_clk);
490
491 tcaddr = NULL;
492
493 return ret;
494 }
495 TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);