1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MPIC timer driver
4 *
5 * Copyright 2013 Freescale Semiconductor, Inc.
6 * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
7 *	   Li Yang <leoli@freescale.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/syscore_ops.h>
21#include <sysdev/fsl_soc.h>
22#include <asm/io.h>
23
24#include <asm/mpic_timer.h>
25
26#define FSL_GLOBAL_TIMER		0x1
27
28/* Clock Ratio
29 * Divide by 64 0x00000300
30 * Divide by 32 0x00000200
31 * Divide by 16 0x00000100
32 * Divide by  8 0x00000000 (Hardware default div)
33 */
34#define MPIC_TIMER_TCR_CLKDIV		0x00000300
35
36#define MPIC_TIMER_TCR_ROVR_OFFSET	24
37
38#define TIMER_STOP			0x80000000
39#define GTCCR_TOG			0x80000000
40#define TIMERS_PER_GROUP		4
41#define MAX_TICKS			(~0U >> 1)
42#define MAX_TICKS_CASCADE		(~0U)
43#define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
44
45struct timer_regs {
46	u32	gtccr;
47	u32	res0[3];
48	u32	gtbcr;
49	u32	res1[3];
50	u32	gtvpr;
51	u32	res2[3];
52	u32	gtdr;
53	u32	res3[3];
54};
55
56struct cascade_priv {
57	u32 tcr_value;			/* TCR register: CASC & ROVR value */
58	unsigned int cascade_map;	/* cascade map */
59	unsigned int timer_num;		/* cascade control timer */
60};
61
62struct timer_group_priv {
63	struct timer_regs __iomem	*regs;
64	struct mpic_timer		timer[TIMERS_PER_GROUP];
65	struct list_head		node;
66	unsigned int			timerfreq;
67	unsigned int			idle;
68	unsigned int			flags;
69	spinlock_t			lock;
70	void __iomem			*group_tcr;
71};
72
73static struct cascade_priv cascade_timer[] = {
74	/* cascade timer 0 and 1 */
75	{0x1, 0xc, 0x1},
76	/* cascade timer 1 and 2 */
77	{0x2, 0x6, 0x2},
78	/* cascade timer 2 and 3 */
79	{0x4, 0x3, 0x3}
80};
81
82static LIST_HEAD(timer_group_list);
83
84static void convert_ticks_to_time(struct timer_group_priv *priv,
85		const u64 ticks, time64_t *time)
86{
87	*time = (u64)div_u64(ticks, priv->timerfreq);
88}
89
90/* the time set by the user is converted to "ticks" */
91static int convert_time_to_ticks(struct timer_group_priv *priv,
92		time64_t time, u64 *ticks)
93{
94	u64 max_value;		/* prevent u64 overflow */
95
96	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
97
98	if (time > max_value)
99		return -EINVAL;
100
101	*ticks = (u64)time * (u64)priv->timerfreq;
102
103	return 0;
104}
105
106/* detect whether there is a cascade timer available */
107static struct mpic_timer *detect_idle_cascade_timer(
108					struct timer_group_priv *priv)
109{
110	struct cascade_priv *casc_priv;
111	unsigned int map;
112	unsigned int array_size = ARRAY_SIZE(cascade_timer);
113	unsigned int num;
114	unsigned int i;
115	unsigned long flags;
116
117	casc_priv = cascade_timer;
118	for (i = 0; i < array_size; i++) {
119		spin_lock_irqsave(&priv->lock, flags);
120		map = casc_priv->cascade_map & priv->idle;
121		if (map == casc_priv->cascade_map) {
122			num = casc_priv->timer_num;
123			priv->timer[num].cascade_handle = casc_priv;
124
125			/* set timer busy */
126			priv->idle &= ~casc_priv->cascade_map;
127			spin_unlock_irqrestore(&priv->lock, flags);
128			return &priv->timer[num];
129		}
130		spin_unlock_irqrestore(&priv->lock, flags);
131		casc_priv++;
132	}
133
134	return NULL;
135}
136
137static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
138		unsigned int num)
139{
140	struct cascade_priv *casc_priv;
141	u32 tcr;
142	u32 tmp_ticks;
143	u32 rem_ticks;
144
145	/* set group tcr reg for cascade */
146	casc_priv = priv->timer[num].cascade_handle;
147	if (!casc_priv)
148		return -EINVAL;
149
150	tcr = casc_priv->tcr_value |
151		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
152	setbits32(priv->group_tcr, tcr);
153
154	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
155
156	out_be32(&priv->regs[num].gtccr, 0);
157	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
158
159	out_be32(&priv->regs[num - 1].gtccr, 0);
160	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
161
162	return 0;
163}
164
165static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
166					u64 ticks)
167{
168	struct mpic_timer *allocated_timer;
169
170	/* Two cascade timers: Support the maximum time */
171	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
172	int ret;
173
174	if (ticks > max_ticks)
175		return NULL;
176
177	/* detect idle timer */
178	allocated_timer = detect_idle_cascade_timer(priv);
179	if (!allocated_timer)
180		return NULL;
181
182	/* set ticks to timer */
183	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
184	if (ret < 0)
185		return NULL;
186
187	return allocated_timer;
188}
189
190static struct mpic_timer *get_timer(time64_t time)
191{
192	struct timer_group_priv *priv;
193	struct mpic_timer *timer;
194
195	u64 ticks;
196	unsigned int num;
197	unsigned int i;
198	unsigned long flags;
199	int ret;
200
201	list_for_each_entry(priv, &timer_group_list, node) {
202		ret = convert_time_to_ticks(priv, time, &ticks);
203		if (ret < 0)
204			return NULL;
205
206		if (ticks > MAX_TICKS) {
207			if (!(priv->flags & FSL_GLOBAL_TIMER))
208				return NULL;
209
210			timer = get_cascade_timer(priv, ticks);
211			if (!timer)
212				continue;
213
214			return timer;
215		}
216
217		for (i = 0; i < TIMERS_PER_GROUP; i++) {
218			/* one timer: Reverse allocation */
219			num = TIMERS_PER_GROUP - 1 - i;
220			spin_lock_irqsave(&priv->lock, flags);
221			if (priv->idle & (1 << i)) {
222				/* set timer busy */
223				priv->idle &= ~(1 << i);
224				/* set ticks & stop timer */
225				out_be32(&priv->regs[num].gtbcr,
226					ticks | TIMER_STOP);
227				out_be32(&priv->regs[num].gtccr, 0);
228				priv->timer[num].cascade_handle = NULL;
229				spin_unlock_irqrestore(&priv->lock, flags);
230				return &priv->timer[num];
231			}
232			spin_unlock_irqrestore(&priv->lock, flags);
233		}
234	}
235
236	return NULL;
237}
238
239/**
240 * mpic_start_timer - start hardware timer
241 * @handle: the timer to be started.
242 *
243 * It will do ->fn(->dev) callback from the hardware interrupt at
244 * the 'time64_t' point in the future.
245 */
246void mpic_start_timer(struct mpic_timer *handle)
247{
248	struct timer_group_priv *priv = container_of(handle,
249			struct timer_group_priv, timer[handle->num]);
250
251	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
252}
253EXPORT_SYMBOL(mpic_start_timer);
254
255/**
256 * mpic_stop_timer - stop hardware timer
257 * @handle: the timer to be stopped
258 *
259 * The timer periodically generates an interrupt. Unless user stops the timer.
260 */
261void mpic_stop_timer(struct mpic_timer *handle)
262{
263	struct timer_group_priv *priv = container_of(handle,
264			struct timer_group_priv, timer[handle->num]);
265	struct cascade_priv *casc_priv;
266
267	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
268
269	casc_priv = priv->timer[handle->num].cascade_handle;
270	if (casc_priv) {
271		out_be32(&priv->regs[handle->num].gtccr, 0);
272		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
273	} else {
274		out_be32(&priv->regs[handle->num].gtccr, 0);
275	}
276}
277EXPORT_SYMBOL(mpic_stop_timer);
278
279/**
280 * mpic_get_remain_time - get timer time
281 * @handle: the timer to be selected.
282 * @time: time for timer
283 *
284 * Query timer remaining time.
285 */
286void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
287{
288	struct timer_group_priv *priv = container_of(handle,
289			struct timer_group_priv, timer[handle->num]);
290	struct cascade_priv *casc_priv;
291
292	u64 ticks;
293	u32 tmp_ticks;
294
295	casc_priv = priv->timer[handle->num].cascade_handle;
296	if (casc_priv) {
297		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
298		tmp_ticks &= ~GTCCR_TOG;
299		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
300		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
301		ticks += tmp_ticks;
302	} else {
303		ticks = in_be32(&priv->regs[handle->num].gtccr);
304		ticks &= ~GTCCR_TOG;
305	}
306
307	convert_ticks_to_time(priv, ticks, time);
308}
309EXPORT_SYMBOL(mpic_get_remain_time);
310
311/**
312 * mpic_free_timer - free hardware timer
313 * @handle: the timer to be removed.
314 *
315 * Free the timer.
316 *
317 * Note: can not be used in interrupt context.
318 */
319void mpic_free_timer(struct mpic_timer *handle)
320{
321	struct timer_group_priv *priv = container_of(handle,
322			struct timer_group_priv, timer[handle->num]);
323
324	struct cascade_priv *casc_priv;
325	unsigned long flags;
326
327	mpic_stop_timer(handle);
328
329	casc_priv = priv->timer[handle->num].cascade_handle;
330
331	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
332
333	spin_lock_irqsave(&priv->lock, flags);
334	if (casc_priv) {
335		u32 tcr;
336		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
337					MPIC_TIMER_TCR_ROVR_OFFSET);
338		clrbits32(priv->group_tcr, tcr);
339		priv->idle |= casc_priv->cascade_map;
340		priv->timer[handle->num].cascade_handle = NULL;
341	} else {
342		priv->idle |= TIMER_OFFSET(handle->num);
343	}
344	spin_unlock_irqrestore(&priv->lock, flags);
345}
346EXPORT_SYMBOL(mpic_free_timer);
347
348/**
349 * mpic_request_timer - get a hardware timer
350 * @fn: interrupt handler function
351 * @dev: callback function of the data
352 * @time: time for timer
353 *
354 * This executes the "request_irq", returning NULL
355 * else "handle" on success.
356 */
357struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
358				      time64_t time)
359{
360	struct mpic_timer *allocated_timer;
361	int ret;
362
363	if (list_empty(&timer_group_list))
364		return NULL;
365
366	if (time < 0)
367		return NULL;
368
369	allocated_timer = get_timer(time);
370	if (!allocated_timer)
371		return NULL;
372
373	ret = request_irq(allocated_timer->irq, fn,
374			IRQF_TRIGGER_LOW, "global-timer", dev);
375	if (ret) {
376		mpic_free_timer(allocated_timer);
377		return NULL;
378	}
379
380	allocated_timer->dev = dev;
381
382	return allocated_timer;
383}
384EXPORT_SYMBOL(mpic_request_timer);
385
386static int __init timer_group_get_freq(struct device_node *np,
387			struct timer_group_priv *priv)
388{
389	u32 div;
390
391	if (priv->flags & FSL_GLOBAL_TIMER) {
392		struct device_node *dn;
393
394		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
395		if (dn) {
396			of_property_read_u32(dn, "clock-frequency",
397					&priv->timerfreq);
398			of_node_put(dn);
399		}
400	}
401
402	if (priv->timerfreq <= 0)
403		return -EINVAL;
404
405	if (priv->flags & FSL_GLOBAL_TIMER) {
406		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
407		priv->timerfreq /= div;
408	}
409
410	return 0;
411}
412
413static int __init timer_group_get_irq(struct device_node *np,
414		struct timer_group_priv *priv)
415{
416	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
417	const u32 *p;
418	u32 offset;
419	u32 count;
420
421	unsigned int i;
422	unsigned int j;
423	unsigned int irq_index = 0;
424	unsigned int irq;
425	int len;
426
427	p = of_get_property(np, "fsl,available-ranges", &len);
428	if (p && len % (2 * sizeof(u32)) != 0) {
429		pr_err("%pOF: malformed available-ranges property.\n", np);
430		return -EINVAL;
431	}
432
433	if (!p) {
434		p = all_timer;
435		len = sizeof(all_timer);
436	}
437
438	len /= 2 * sizeof(u32);
439
440	for (i = 0; i < len; i++) {
441		offset = p[i * 2];
442		count = p[i * 2 + 1];
443		for (j = 0; j < count; j++) {
444			irq = irq_of_parse_and_map(np, irq_index);
445			if (!irq) {
446				pr_err("%pOF: irq parse and map failed.\n", np);
447				return -EINVAL;
448			}
449
450			/* Set timer idle */
451			priv->idle |= TIMER_OFFSET((offset + j));
452			priv->timer[offset + j].irq = irq;
453			priv->timer[offset + j].num = offset + j;
454			irq_index++;
455		}
456	}
457
458	return 0;
459}
460
461static void __init timer_group_init(struct device_node *np)
462{
463	struct timer_group_priv *priv;
464	unsigned int i = 0;
465	int ret;
466
467	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
468	if (!priv) {
469		pr_err("%pOF: cannot allocate memory for group.\n", np);
470		return;
471	}
472
473	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
474		priv->flags |= FSL_GLOBAL_TIMER;
475
476	priv->regs = of_iomap(np, i++);
477	if (!priv->regs) {
478		pr_err("%pOF: cannot ioremap timer register address.\n", np);
479		goto out;
480	}
481
482	if (priv->flags & FSL_GLOBAL_TIMER) {
483		priv->group_tcr = of_iomap(np, i++);
484		if (!priv->group_tcr) {
485			pr_err("%pOF: cannot ioremap tcr address.\n", np);
486			goto out;
487		}
488	}
489
490	ret = timer_group_get_freq(np, priv);
491	if (ret < 0) {
492		pr_err("%pOF: cannot get timer frequency.\n", np);
493		goto out;
494	}
495
496	ret = timer_group_get_irq(np, priv);
497	if (ret < 0) {
498		pr_err("%pOF: cannot get timer irqs.\n", np);
499		goto out;
500	}
501
502	spin_lock_init(&priv->lock);
503
504	/* Init FSL timer hardware */
505	if (priv->flags & FSL_GLOBAL_TIMER)
506		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
507
508	list_add_tail(&priv->node, &timer_group_list);
509
510	return;
511
512out:
513	if (priv->regs)
514		iounmap(priv->regs);
515
516	if (priv->group_tcr)
517		iounmap(priv->group_tcr);
518
519	kfree(priv);
520}
521
522static void mpic_timer_resume(void)
523{
524	struct timer_group_priv *priv;
525
526	list_for_each_entry(priv, &timer_group_list, node) {
527		/* Init FSL timer hardware */
528		if (priv->flags & FSL_GLOBAL_TIMER)
529			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
530	}
531}
532
533static const struct of_device_id mpic_timer_ids[] = {
534	{ .compatible = "fsl,mpic-global-timer", },
535	{},
536};
537
538static struct syscore_ops mpic_timer_syscore_ops = {
539	.resume = mpic_timer_resume,
540};
541
542static int __init mpic_timer_init(void)
543{
544	struct device_node *np = NULL;
545
546	for_each_matching_node(np, mpic_timer_ids)
547		timer_group_init(np);
548
549	register_syscore_ops(&mpic_timer_syscore_ops);
550
551	if (list_empty(&timer_group_list))
552		return -ENODEV;
553
554	return 0;
555}
556subsys_initcall(mpic_timer_init);
557