1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
6
7#define pr_fmt(fmt) "riscv-imsic: " fmt
8#include <linux/cpu.h>
9#include <linux/bitmap.h>
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16#include <linux/seq_file.h>
17#include <linux/spinlock.h>
18#include <linux/smp.h>
19#include <asm/hwcap.h>
20
21#include "irq-riscv-imsic-state.h"
22
23#define IMSIC_DISABLE_EIDELIVERY		0
24#define IMSIC_ENABLE_EIDELIVERY			1
25#define IMSIC_DISABLE_EITHRESHOLD		1
26#define IMSIC_ENABLE_EITHRESHOLD		0
27
28static inline void imsic_csr_write(unsigned long reg, unsigned long val)
29{
30	csr_write(CSR_ISELECT, reg);
31	csr_write(CSR_IREG, val);
32}
33
34static inline unsigned long imsic_csr_read(unsigned long reg)
35{
36	csr_write(CSR_ISELECT, reg);
37	return csr_read(CSR_IREG);
38}
39
40static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val)
41{
42	csr_write(CSR_ISELECT, reg);
43	return csr_read_clear(CSR_IREG, val);
44}
45
46static inline void imsic_csr_set(unsigned long reg, unsigned long val)
47{
48	csr_write(CSR_ISELECT, reg);
49	csr_set(CSR_IREG, val);
50}
51
52static inline void imsic_csr_clear(unsigned long reg, unsigned long val)
53{
54	csr_write(CSR_ISELECT, reg);
55	csr_clear(CSR_IREG, val);
56}
57
58struct imsic_priv *imsic;
59
60const struct imsic_global_config *imsic_get_global_config(void)
61{
62	return imsic ? &imsic->global : NULL;
63}
64EXPORT_SYMBOL_GPL(imsic_get_global_config);
65
66static bool __imsic_eix_read_clear(unsigned long id, bool pend)
67{
68	unsigned long isel, imask;
69
70	isel = id / BITS_PER_LONG;
71	isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
72	isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
73	imask = BIT(id & (__riscv_xlen - 1));
74
75	return !!(imsic_csr_read_clear(isel, imask) & imask);
76}
77
78static inline bool __imsic_id_read_clear_enabled(unsigned long id)
79{
80	return __imsic_eix_read_clear(id, false);
81}
82
83static inline bool __imsic_id_read_clear_pending(unsigned long id)
84{
85	return __imsic_eix_read_clear(id, true);
86}
87
88void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val)
89{
90	unsigned long id = base_id, last_id = base_id + num_id;
91	unsigned long i, isel, ireg;
92
93	while (id < last_id) {
94		isel = id / BITS_PER_LONG;
95		isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
96		isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
97
98		/*
99		 * Prepare the ID mask to be programmed in the
100		 * IMSIC EIEx and EIPx registers. These registers
101		 * are XLEN-wide and we must not touch IDs which
102		 * are < base_id and >= (base_id + num_id).
103		 */
104		ireg = 0;
105		for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) {
106			ireg |= BIT(i);
107			id++;
108		}
109
110		/*
111		 * The IMSIC EIEx and EIPx registers are indirectly
112		 * accessed via using ISELECT and IREG CSRs so we
113		 * need to access these CSRs without getting preempted.
114		 *
115		 * All existing users of this function call this
116		 * function with local IRQs disabled so we don't
117		 * need to do anything special here.
118		 */
119		if (val)
120			imsic_csr_set(isel, ireg);
121		else
122			imsic_csr_clear(isel, ireg);
123	}
124}
125
126static void __imsic_local_sync(struct imsic_local_priv *lpriv)
127{
128	struct imsic_local_config *mlocal;
129	struct imsic_vector *vec, *mvec;
130	int i;
131
132	lockdep_assert_held(&lpriv->lock);
133
134	for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
135		if (!i || i == IMSIC_IPI_ID)
136			goto skip;
137		vec = &lpriv->vectors[i];
138
139		if (READ_ONCE(vec->enable))
140			__imsic_id_set_enable(i);
141		else
142			__imsic_id_clear_enable(i);
143
144		/*
145		 * If the ID was being moved to a new ID on some other CPU
146		 * then we can get a MSI during the movement so check the
147		 * ID pending bit and re-trigger the new ID on other CPU
148		 * using MMIO write.
149		 */
150		mvec = READ_ONCE(vec->move);
151		WRITE_ONCE(vec->move, NULL);
152		if (mvec && mvec != vec) {
153			if (__imsic_id_read_clear_pending(i)) {
154				mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
155				writel_relaxed(mvec->local_id, mlocal->msi_va);
156			}
157
158			imsic_vector_free(&lpriv->vectors[i]);
159		}
160
161skip:
162		bitmap_clear(lpriv->dirty_bitmap, i, 1);
163	}
164}
165
166void imsic_local_sync_all(void)
167{
168	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
169	unsigned long flags;
170
171	raw_spin_lock_irqsave(&lpriv->lock, flags);
172	bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
173	__imsic_local_sync(lpriv);
174	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
175}
176
177void imsic_local_delivery(bool enable)
178{
179	if (enable) {
180		imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
181		imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
182		return;
183	}
184
185	imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
186	imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
187}
188
189#ifdef CONFIG_SMP
190static void imsic_local_timer_callback(struct timer_list *timer)
191{
192	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
193	unsigned long flags;
194
195	raw_spin_lock_irqsave(&lpriv->lock, flags);
196	__imsic_local_sync(lpriv);
197	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
198}
199
200static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
201{
202	lockdep_assert_held(&lpriv->lock);
203
204	/*
205	 * The spinlock acquire/release semantics ensure that changes
206	 * to vector enable, vector move and dirty bitmap are visible
207	 * to the target CPU.
208	 */
209
210	/*
211	 * We schedule a timer on the target CPU if the target CPU is not
212	 * same as the current CPU. An offline CPU will unconditionally
213	 * synchronize IDs through imsic_starting_cpu() when the
214	 * CPU is brought up.
215	 */
216	if (cpu_online(cpu)) {
217		if (cpu == smp_processor_id()) {
218			__imsic_local_sync(lpriv);
219			return;
220		}
221
222		if (!timer_pending(&lpriv->timer)) {
223			lpriv->timer.expires = jiffies + 1;
224			add_timer_on(&lpriv->timer, cpu);
225		}
226	}
227}
228#else
229static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
230{
231	lockdep_assert_held(&lpriv->lock);
232	__imsic_local_sync(lpriv);
233}
234#endif
235
236void imsic_vector_mask(struct imsic_vector *vec)
237{
238	struct imsic_local_priv *lpriv;
239
240	lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
241	if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
242		return;
243
244	/*
245	 * This function is called through Linux irq subsystem with
246	 * irqs disabled so no need to save/restore irq flags.
247	 */
248
249	raw_spin_lock(&lpriv->lock);
250
251	WRITE_ONCE(vec->enable, false);
252	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
253	__imsic_remote_sync(lpriv, vec->cpu);
254
255	raw_spin_unlock(&lpriv->lock);
256}
257
258void imsic_vector_unmask(struct imsic_vector *vec)
259{
260	struct imsic_local_priv *lpriv;
261
262	lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
263	if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
264		return;
265
266	/*
267	 * This function is called through Linux irq subsystem with
268	 * irqs disabled so no need to save/restore irq flags.
269	 */
270
271	raw_spin_lock(&lpriv->lock);
272
273	WRITE_ONCE(vec->enable, true);
274	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
275	__imsic_remote_sync(lpriv, vec->cpu);
276
277	raw_spin_unlock(&lpriv->lock);
278}
279
280static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
281				     bool new_enable, struct imsic_vector *new_move)
282{
283	unsigned long flags;
284	bool enabled;
285
286	raw_spin_lock_irqsave(&lpriv->lock, flags);
287
288	/* Update enable and move details */
289	enabled = READ_ONCE(vec->enable);
290	WRITE_ONCE(vec->enable, new_enable);
291	WRITE_ONCE(vec->move, new_move);
292
293	/* Mark the vector as dirty and synchronize */
294	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
295	__imsic_remote_sync(lpriv, vec->cpu);
296
297	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
298
299	return enabled;
300}
301
302void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec)
303{
304	struct imsic_local_priv *old_lpriv, *new_lpriv;
305	bool enabled;
306
307	if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
308		return;
309
310	old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
311	if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec))
312		return;
313
314	new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
315	if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec))
316		return;
317
318	/*
319	 * Move and re-trigger the new vector based on the pending
320	 * state of the old vector because we might get a device
321	 * interrupt on the old vector while device was being moved
322	 * to the new vector.
323	 */
324	enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
325	imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
326}
327
328#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
329void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind)
330{
331	struct imsic_local_priv *lpriv;
332	struct imsic_vector *mvec;
333	bool is_enabled;
334
335	lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
336	if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
337		return;
338
339	is_enabled = imsic_vector_isenabled(vec);
340	mvec = imsic_vector_get_move(vec);
341
342	seq_printf(m, "%*starget_cpu      : %5u\n", ind, "", vec->cpu);
343	seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
344	seq_printf(m, "%*sis_reserved     : %5u\n", ind, "",
345		   (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
346	seq_printf(m, "%*sis_enabled      : %5u\n", ind, "", is_enabled ? 1 : 0);
347	seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
348	if (mvec) {
349		seq_printf(m, "%*smove_cpu        : %5u\n", ind, "", mvec->cpu);
350		seq_printf(m, "%*smove_local_id   : %5u\n", ind, "", mvec->local_id);
351	}
352}
353
354void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
355{
356	irq_matrix_debug_show(m, imsic->matrix, ind);
357}
358#endif
359
360struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
361{
362	struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
363
364	if (!lpriv || imsic->global.nr_ids < local_id)
365		return NULL;
366
367	return &lpriv->vectors[local_id];
368}
369
370struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
371{
372	struct imsic_vector *vec = NULL;
373	struct imsic_local_priv *lpriv;
374	unsigned long flags;
375	unsigned int cpu;
376	int local_id;
377
378	raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
379	local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
380	raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
381	if (local_id < 0)
382		return NULL;
383
384	lpriv = per_cpu_ptr(imsic->lpriv, cpu);
385	vec = &lpriv->vectors[local_id];
386	vec->hwirq = hwirq;
387	vec->enable = false;
388	vec->move = NULL;
389
390	return vec;
391}
392
393void imsic_vector_free(struct imsic_vector *vec)
394{
395	unsigned long flags;
396
397	raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
398	vec->hwirq = UINT_MAX;
399	irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
400	raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
401}
402
403static void __init imsic_local_cleanup(void)
404{
405	struct imsic_local_priv *lpriv;
406	int cpu;
407
408	for_each_possible_cpu(cpu) {
409		lpriv = per_cpu_ptr(imsic->lpriv, cpu);
410
411		bitmap_free(lpriv->dirty_bitmap);
412		kfree(lpriv->vectors);
413	}
414
415	free_percpu(imsic->lpriv);
416}
417
418static int __init imsic_local_init(void)
419{
420	struct imsic_global_config *global = &imsic->global;
421	struct imsic_local_priv *lpriv;
422	struct imsic_vector *vec;
423	int cpu, i;
424
425	/* Allocate per-CPU private state */
426	imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
427	if (!imsic->lpriv)
428		return -ENOMEM;
429
430	/* Setup per-CPU private state */
431	for_each_possible_cpu(cpu) {
432		lpriv = per_cpu_ptr(imsic->lpriv, cpu);
433
434		raw_spin_lock_init(&lpriv->lock);
435
436		/* Allocate dirty bitmap */
437		lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL);
438		if (!lpriv->dirty_bitmap)
439			goto fail_local_cleanup;
440
441#ifdef CONFIG_SMP
442		/* Setup lazy timer for synchronization */
443		timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
444#endif
445
446		/* Allocate vector array */
447		lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
448					 GFP_KERNEL);
449		if (!lpriv->vectors)
450			goto fail_local_cleanup;
451
452		/* Setup vector array */
453		for (i = 0; i <= global->nr_ids; i++) {
454			vec = &lpriv->vectors[i];
455			vec->cpu = cpu;
456			vec->local_id = i;
457			vec->hwirq = UINT_MAX;
458		}
459	}
460
461	return 0;
462
463fail_local_cleanup:
464	imsic_local_cleanup();
465	return -ENOMEM;
466}
467
468void imsic_state_online(void)
469{
470	unsigned long flags;
471
472	raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
473	irq_matrix_online(imsic->matrix);
474	raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
475}
476
477void imsic_state_offline(void)
478{
479	unsigned long flags;
480
481	raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
482	irq_matrix_offline(imsic->matrix);
483	raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
484
485#ifdef CONFIG_SMP
486	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
487
488	raw_spin_lock_irqsave(&lpriv->lock, flags);
489	WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
490	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
491#endif
492}
493
494static int __init imsic_matrix_init(void)
495{
496	struct imsic_global_config *global = &imsic->global;
497
498	raw_spin_lock_init(&imsic->matrix_lock);
499	imsic->matrix = irq_alloc_matrix(global->nr_ids + 1,
500					 0, global->nr_ids + 1);
501	if (!imsic->matrix)
502		return -ENOMEM;
503
504	/* Reserve ID#0 because it is special and never implemented */
505	irq_matrix_assign_system(imsic->matrix, 0, false);
506
507	/* Reserve IPI ID because it is special and used internally */
508	irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
509
510	return 0;
511}
512
513static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
514					  u32 index, unsigned long *hartid)
515{
516	struct of_phandle_args parent;
517	int rc;
518
519	/*
520	 * Currently, only OF fwnode is supported so extend this
521	 * function for ACPI support.
522	 */
523	if (!is_of_node(fwnode))
524		return -EINVAL;
525
526	rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
527	if (rc)
528		return rc;
529
530	/*
531	 * Skip interrupts other than external interrupts for
532	 * current privilege level.
533	 */
534	if (parent.args[0] != RV_IRQ_EXT)
535		return -EINVAL;
536
537	return riscv_of_parent_hartid(parent.np, hartid);
538}
539
540static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
541					  u32 index, struct resource *res)
542{
543	/*
544	 * Currently, only OF fwnode is supported so extend this
545	 * function for ACPI support.
546	 */
547	if (!is_of_node(fwnode))
548		return -EINVAL;
549
550	return of_address_to_resource(to_of_node(fwnode), index, res);
551}
552
553static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
554				     struct imsic_global_config *global,
555				     u32 *nr_parent_irqs,
556				     u32 *nr_mmios)
557{
558	unsigned long hartid;
559	struct resource res;
560	int rc;
561	u32 i;
562
563	/*
564	 * Currently, only OF fwnode is supported so extend this
565	 * function for ACPI support.
566	 */
567	if (!is_of_node(fwnode))
568		return -EINVAL;
569
570	*nr_parent_irqs = 0;
571	*nr_mmios = 0;
572
573	/* Find number of parent interrupts */
574	while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid))
575		(*nr_parent_irqs)++;
576	if (!*nr_parent_irqs) {
577		pr_err("%pfwP: no parent irqs available\n", fwnode);
578		return -EINVAL;
579	}
580
581	/* Find number of guest index bits in MSI address */
582	rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
583				  &global->guest_index_bits);
584	if (rc)
585		global->guest_index_bits = 0;
586
587	/* Find number of HART index bits */
588	rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
589				  &global->hart_index_bits);
590	if (rc) {
591		/* Assume default value */
592		global->hart_index_bits = __fls(*nr_parent_irqs);
593		if (BIT(global->hart_index_bits) < *nr_parent_irqs)
594			global->hart_index_bits++;
595	}
596
597	/* Find number of group index bits */
598	rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
599				  &global->group_index_bits);
600	if (rc)
601		global->group_index_bits = 0;
602
603	/*
604	 * Find first bit position of group index.
605	 * If not specified assumed the default APLIC-IMSIC configuration.
606	 */
607	rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
608				  &global->group_index_shift);
609	if (rc)
610		global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
611
612	/* Find number of interrupt identities */
613	rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
614				  &global->nr_ids);
615	if (rc) {
616		pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
617		return rc;
618	}
619
620	/* Find number of guest interrupt identities */
621	rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
622				  &global->nr_guest_ids);
623	if (rc)
624		global->nr_guest_ids = global->nr_ids;
625
626	/* Sanity check guest index bits */
627	i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
628	if (i < global->guest_index_bits) {
629		pr_err("%pfwP: guest index bits too big\n", fwnode);
630		return -EINVAL;
631	}
632
633	/* Sanity check HART index bits */
634	i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits;
635	if (i < global->hart_index_bits) {
636		pr_err("%pfwP: HART index bits too big\n", fwnode);
637		return -EINVAL;
638	}
639
640	/* Sanity check group index bits */
641	i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
642	    global->guest_index_bits - global->hart_index_bits;
643	if (i < global->group_index_bits) {
644		pr_err("%pfwP: group index bits too big\n", fwnode);
645		return -EINVAL;
646	}
647
648	/* Sanity check group index shift */
649	i = global->group_index_bits + global->group_index_shift - 1;
650	if (i >= BITS_PER_LONG) {
651		pr_err("%pfwP: group index shift too big\n", fwnode);
652		return -EINVAL;
653	}
654
655	/* Sanity check number of interrupt identities */
656	if (global->nr_ids < IMSIC_MIN_ID ||
657	    global->nr_ids >= IMSIC_MAX_ID ||
658	    (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
659		pr_err("%pfwP: invalid number of interrupt identities\n", fwnode);
660		return -EINVAL;
661	}
662
663	/* Sanity check number of guest interrupt identities */
664	if (global->nr_guest_ids < IMSIC_MIN_ID ||
665	    global->nr_guest_ids >= IMSIC_MAX_ID ||
666	    (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
667		pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode);
668		return -EINVAL;
669	}
670
671	/* Compute base address */
672	rc = imsic_get_mmio_resource(fwnode, 0, &res);
673	if (rc) {
674		pr_err("%pfwP: first MMIO resource not found\n", fwnode);
675		return -EINVAL;
676	}
677	global->base_addr = res.start;
678	global->base_addr &= ~(BIT(global->guest_index_bits +
679				   global->hart_index_bits +
680				   IMSIC_MMIO_PAGE_SHIFT) - 1);
681	global->base_addr &= ~((BIT(global->group_index_bits) - 1) <<
682			       global->group_index_shift);
683
684	/* Find number of MMIO register sets */
685	while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res))
686		(*nr_mmios)++;
687
688	return 0;
689}
690
691int __init imsic_setup_state(struct fwnode_handle *fwnode)
692{
693	u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
694	struct imsic_global_config *global;
695	struct imsic_local_config *local;
696	void __iomem **mmios_va = NULL;
697	struct resource *mmios = NULL;
698	unsigned long reloff, hartid;
699	phys_addr_t base_addr;
700	int rc, cpu;
701
702	/*
703	 * Only one IMSIC instance allowed in a platform for clean
704	 * implementation of SMP IRQ affinity and per-CPU IPIs.
705	 *
706	 * This means on a multi-socket (or multi-die) platform we
707	 * will have multiple MMIO regions for one IMSIC instance.
708	 */
709	if (imsic) {
710		pr_err("%pfwP: already initialized hence ignoring\n", fwnode);
711		return -EALREADY;
712	}
713
714	if (!riscv_isa_extension_available(NULL, SxAIA)) {
715		pr_err("%pfwP: AIA support not available\n", fwnode);
716		return -ENODEV;
717	}
718
719	imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
720	if (!imsic)
721		return -ENOMEM;
722	imsic->fwnode = fwnode;
723	global = &imsic->global;
724
725	global->local = alloc_percpu(typeof(*global->local));
726	if (!global->local) {
727		rc = -ENOMEM;
728		goto out_free_priv;
729	}
730
731	/* Parse IMSIC fwnode */
732	rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios);
733	if (rc)
734		goto out_free_local;
735
736	/* Allocate MMIO resource array */
737	mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL);
738	if (!mmios) {
739		rc = -ENOMEM;
740		goto out_free_local;
741	}
742
743	/* Allocate MMIO virtual address array */
744	mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL);
745	if (!mmios_va) {
746		rc = -ENOMEM;
747		goto out_iounmap;
748	}
749
750	/* Parse and map MMIO register sets */
751	for (i = 0; i < nr_mmios; i++) {
752		rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]);
753		if (rc) {
754			pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i);
755			goto out_iounmap;
756		}
757
758		base_addr = mmios[i].start;
759		base_addr &= ~(BIT(global->guest_index_bits +
760				   global->hart_index_bits +
761				   IMSIC_MMIO_PAGE_SHIFT) - 1);
762		base_addr &= ~((BIT(global->group_index_bits) - 1) <<
763			       global->group_index_shift);
764		if (base_addr != global->base_addr) {
765			rc = -EINVAL;
766			pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i);
767			goto out_iounmap;
768		}
769
770		mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i]));
771		if (!mmios_va[i]) {
772			rc = -EIO;
773			pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i);
774			goto out_iounmap;
775		}
776	}
777
778	/* Initialize local (or per-CPU )state */
779	rc = imsic_local_init();
780	if (rc) {
781		pr_err("%pfwP: failed to initialize local state\n",
782		       fwnode);
783		goto out_iounmap;
784	}
785
786	/* Configure handlers for target CPUs */
787	for (i = 0; i < nr_parent_irqs; i++) {
788		rc = imsic_get_parent_hartid(fwnode, i, &hartid);
789		if (rc) {
790			pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i);
791			continue;
792		}
793
794		cpu = riscv_hartid_to_cpuid(hartid);
795		if (cpu < 0) {
796			pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i);
797			continue;
798		}
799
800		/* Find MMIO location of MSI page */
801		index = nr_mmios;
802		reloff = i * BIT(global->guest_index_bits) *
803			 IMSIC_MMIO_PAGE_SZ;
804		for (j = 0; nr_mmios; j++) {
805			if (reloff < resource_size(&mmios[j])) {
806				index = j;
807				break;
808			}
809
810			/*
811			 * MMIO region size may not be aligned to
812			 * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
813			 * if holes are present.
814			 */
815			reloff -= ALIGN(resource_size(&mmios[j]),
816			BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ);
817		}
818		if (index >= nr_mmios) {
819			pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i);
820			continue;
821		}
822
823		local = per_cpu_ptr(global->local, cpu);
824		local->msi_pa = mmios[index].start + reloff;
825		local->msi_va = mmios_va[index] + reloff;
826
827		nr_handlers++;
828	}
829
830	/* If no CPU handlers found then can't take interrupts */
831	if (!nr_handlers) {
832		pr_err("%pfwP: No CPU handlers found\n", fwnode);
833		rc = -ENODEV;
834		goto out_local_cleanup;
835	}
836
837	/* Initialize matrix allocator */
838	rc = imsic_matrix_init();
839	if (rc) {
840		pr_err("%pfwP: failed to create matrix allocator\n", fwnode);
841		goto out_local_cleanup;
842	}
843
844	/* We don't need MMIO arrays anymore so let's free-up */
845	kfree(mmios_va);
846	kfree(mmios);
847
848	return 0;
849
850out_local_cleanup:
851	imsic_local_cleanup();
852out_iounmap:
853	for (i = 0; i < nr_mmios; i++) {
854		if (mmios_va[i])
855			iounmap(mmios_va[i]);
856	}
857	kfree(mmios_va);
858	kfree(mmios);
859out_free_local:
860	free_percpu(imsic->global.local);
861out_free_priv:
862	kfree(imsic);
863	imsic = NULL;
864	return rc;
865}
866