1/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
6 * Copyright (C) 2005 - 2010  Paul Mundt
7 * Copyright (C) 2010  Matt Fleming
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/sysdev.h>
16#include <linux/cpu.h>
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/debugfs.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/err.h>
23#include <linux/io.h>
24#include <linux/spinlock.h>
25#include <linux/vmalloc.h>
26#include <asm/cacheflush.h>
27#include <asm/sizes.h>
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/pgtable.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
33#include <asm/mmu_context.h>
34
35struct pmb_entry;
36
37struct pmb_entry {
38	unsigned long vpn;
39	unsigned long ppn;
40	unsigned long flags;
41	unsigned long size;
42
43	spinlock_t lock;
44
45	/*
46	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47	 * PMB_NO_ENTRY to search for a free one
48	 */
49	int entry;
50
51	/* Adjacent entry link for contiguous multi-entry mappings */
52	struct pmb_entry *link;
53};
54
55static struct {
56	unsigned long size;
57	int flag;
58} pmb_sizes[] = {
59	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
60	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
61	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
62	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
63};
64
65static void pmb_unmap_entry(struct pmb_entry *, int depth);
66
67static DEFINE_RWLOCK(pmb_rwlock);
68static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
69static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70
71static unsigned int pmb_iomapping_enabled;
72
73static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74{
75	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76}
77
78static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79{
80	return mk_pmb_entry(entry) | PMB_ADDR;
81}
82
83static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84{
85	return mk_pmb_entry(entry) | PMB_DATA;
86}
87
88static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89{
90	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91}
92
93/*
94 * Ensure that the PMB entries match our cache configuration.
95 *
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
98 * translations.
99 */
100static __always_inline unsigned long pmb_cache_flags(void)
101{
102	unsigned long flags = 0;
103
104#if defined(CONFIG_CACHE_OFF)
105	flags |= PMB_WT | PMB_UB;
106#elif defined(CONFIG_CACHE_WRITETHROUGH)
107	flags |= PMB_C | PMB_WT | PMB_UB;
108#elif defined(CONFIG_CACHE_WRITEBACK)
109	flags |= PMB_C;
110#endif
111
112	return flags;
113}
114
115/*
116 * Convert typical pgprot value to the PMB equivalent
117 */
118static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119{
120	unsigned long pmb_flags = 0;
121	u64 flags = pgprot_val(prot);
122
123	if (flags & _PAGE_CACHABLE)
124		pmb_flags |= PMB_C;
125	if (flags & _PAGE_WT)
126		pmb_flags |= PMB_WT | PMB_UB;
127
128	return pmb_flags;
129}
130
131static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132{
133	return (b->vpn == (a->vpn + a->size)) &&
134	       (b->ppn == (a->ppn + a->size)) &&
135	       (b->flags == a->flags);
136}
137
138static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
139			       unsigned long size)
140{
141	int i;
142
143	read_lock(&pmb_rwlock);
144
145	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
146		struct pmb_entry *pmbe, *iter;
147		unsigned long span;
148
149		if (!test_bit(i, pmb_map))
150			continue;
151
152		pmbe = &pmb_entry_list[i];
153
154		/*
155		 * See if VPN and PPN are bounded by an existing mapping.
156		 */
157		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158			continue;
159		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
160			continue;
161
162		/*
163		 * Now see if we're in range of a simple mapping.
164		 */
165		if (size <= pmbe->size) {
166			read_unlock(&pmb_rwlock);
167			return true;
168		}
169
170		span = pmbe->size;
171
172		/*
173		 * Finally for sizes that involve compound mappings, walk
174		 * the chain.
175		 */
176		for (iter = pmbe->link; iter; iter = iter->link)
177			span += iter->size;
178
179		/*
180		 * Nothing else to do if the range requirements are met.
181		 */
182		if (size <= span) {
183			read_unlock(&pmb_rwlock);
184			return true;
185		}
186	}
187
188	read_unlock(&pmb_rwlock);
189	return false;
190}
191
192static bool pmb_size_valid(unsigned long size)
193{
194	int i;
195
196	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
197		if (pmb_sizes[i].size == size)
198			return true;
199
200	return false;
201}
202
203static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
204{
205	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
206}
207
208static inline bool pmb_prot_valid(pgprot_t prot)
209{
210	return (pgprot_val(prot) & _PAGE_USER) == 0;
211}
212
213static int pmb_size_to_flags(unsigned long size)
214{
215	int i;
216
217	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
218		if (pmb_sizes[i].size == size)
219			return pmb_sizes[i].flag;
220
221	return 0;
222}
223
224static int pmb_alloc_entry(void)
225{
226	int pos;
227
228	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
229	if (pos >= 0 && pos < NR_PMB_ENTRIES)
230		__set_bit(pos, pmb_map);
231	else
232		pos = -ENOSPC;
233
234	return pos;
235}
236
237static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
238				   unsigned long flags, int entry)
239{
240	struct pmb_entry *pmbe;
241	unsigned long irqflags;
242	void *ret = NULL;
243	int pos;
244
245	write_lock_irqsave(&pmb_rwlock, irqflags);
246
247	if (entry == PMB_NO_ENTRY) {
248		pos = pmb_alloc_entry();
249		if (unlikely(pos < 0)) {
250			ret = ERR_PTR(pos);
251			goto out;
252		}
253	} else {
254		if (__test_and_set_bit(entry, pmb_map)) {
255			ret = ERR_PTR(-ENOSPC);
256			goto out;
257		}
258
259		pos = entry;
260	}
261
262	write_unlock_irqrestore(&pmb_rwlock, irqflags);
263
264	pmbe = &pmb_entry_list[pos];
265
266	memset(pmbe, 0, sizeof(struct pmb_entry));
267
268	spin_lock_init(&pmbe->lock);
269
270	pmbe->vpn	= vpn;
271	pmbe->ppn	= ppn;
272	pmbe->flags	= flags;
273	pmbe->entry	= pos;
274
275	return pmbe;
276
277out:
278	write_unlock_irqrestore(&pmb_rwlock, irqflags);
279	return ret;
280}
281
282static void pmb_free(struct pmb_entry *pmbe)
283{
284	__clear_bit(pmbe->entry, pmb_map);
285
286	pmbe->entry	= PMB_NO_ENTRY;
287	pmbe->link	= NULL;
288}
289
290/*
291 * Must be run uncached.
292 */
293static void __set_pmb_entry(struct pmb_entry *pmbe)
294{
295	unsigned long addr, data;
296
297	addr = mk_pmb_addr(pmbe->entry);
298	data = mk_pmb_data(pmbe->entry);
299
300	jump_to_uncached();
301
302	/* Set V-bit */
303	__raw_writel(pmbe->vpn | PMB_V, addr);
304	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
305
306	back_to_cached();
307}
308
309static void __clear_pmb_entry(struct pmb_entry *pmbe)
310{
311	unsigned long addr, data;
312	unsigned long addr_val, data_val;
313
314	addr = mk_pmb_addr(pmbe->entry);
315	data = mk_pmb_data(pmbe->entry);
316
317	addr_val = __raw_readl(addr);
318	data_val = __raw_readl(data);
319
320	/* Clear V-bit */
321	writel_uncached(addr_val & ~PMB_V, addr);
322	writel_uncached(data_val & ~PMB_V, data);
323}
324
325#ifdef CONFIG_PM
326static void set_pmb_entry(struct pmb_entry *pmbe)
327{
328	unsigned long flags;
329
330	spin_lock_irqsave(&pmbe->lock, flags);
331	__set_pmb_entry(pmbe);
332	spin_unlock_irqrestore(&pmbe->lock, flags);
333}
334#endif /* CONFIG_PM */
335
336int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
337		     unsigned long size, pgprot_t prot)
338{
339	struct pmb_entry *pmbp, *pmbe;
340	unsigned long orig_addr, orig_size;
341	unsigned long flags, pmb_flags;
342	int i, mapped;
343
344	if (size < SZ_16M)
345		return -EINVAL;
346	if (!pmb_addr_valid(vaddr, size))
347		return -EFAULT;
348	if (pmb_mapping_exists(vaddr, phys, size))
349		return 0;
350
351	orig_addr = vaddr;
352	orig_size = size;
353
354	flush_tlb_kernel_range(vaddr, vaddr + size);
355
356	pmb_flags = pgprot_to_pmb_flags(prot);
357	pmbp = NULL;
358
359	do {
360		for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
361			if (size < pmb_sizes[i].size)
362				continue;
363
364			pmbe = pmb_alloc(vaddr, phys, pmb_flags |
365					 pmb_sizes[i].flag, PMB_NO_ENTRY);
366			if (IS_ERR(pmbe)) {
367				pmb_unmap_entry(pmbp, mapped);
368				return PTR_ERR(pmbe);
369			}
370
371			spin_lock_irqsave(&pmbe->lock, flags);
372
373			pmbe->size = pmb_sizes[i].size;
374
375			__set_pmb_entry(pmbe);
376
377			phys	+= pmbe->size;
378			vaddr	+= pmbe->size;
379			size	-= pmbe->size;
380
381			/*
382			 * Link adjacent entries that span multiple PMB
383			 * entries for easier tear-down.
384			 */
385			if (likely(pmbp)) {
386				spin_lock(&pmbp->lock);
387				pmbp->link = pmbe;
388				spin_unlock(&pmbp->lock);
389			}
390
391			pmbp = pmbe;
392
393			/*
394			 * Instead of trying smaller sizes on every
395			 * iteration (even if we succeed in allocating
396			 * space), try using pmb_sizes[i].size again.
397			 */
398			i--;
399			mapped++;
400
401			spin_unlock_irqrestore(&pmbe->lock, flags);
402		}
403	} while (size >= SZ_16M);
404
405	flush_cache_vmap(orig_addr, orig_addr + orig_size);
406
407	return 0;
408}
409
410void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
411			       pgprot_t prot, void *caller)
412{
413	unsigned long vaddr;
414	phys_addr_t offset, last_addr;
415	phys_addr_t align_mask;
416	unsigned long aligned;
417	struct vm_struct *area;
418	int i, ret;
419
420	if (!pmb_iomapping_enabled)
421		return NULL;
422
423	/*
424	 * Small mappings need to go through the TLB.
425	 */
426	if (size < SZ_16M)
427		return ERR_PTR(-EINVAL);
428	if (!pmb_prot_valid(prot))
429		return ERR_PTR(-EINVAL);
430
431	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
432		if (size >= pmb_sizes[i].size)
433			break;
434
435	last_addr = phys + size;
436	align_mask = ~(pmb_sizes[i].size - 1);
437	offset = phys & ~align_mask;
438	phys &= align_mask;
439	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
440
441	area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
442				    P3SEG, caller);
443	if (!area)
444		return NULL;
445
446	area->phys_addr = phys;
447	vaddr = (unsigned long)area->addr;
448
449	ret = pmb_bolt_mapping(vaddr, phys, size, prot);
450	if (unlikely(ret != 0))
451		return ERR_PTR(ret);
452
453	return (void __iomem *)(offset + (char *)vaddr);
454}
455
456int pmb_unmap(void __iomem *addr)
457{
458	struct pmb_entry *pmbe = NULL;
459	unsigned long vaddr = (unsigned long __force)addr;
460	int i, found = 0;
461
462	read_lock(&pmb_rwlock);
463
464	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
465		if (test_bit(i, pmb_map)) {
466			pmbe = &pmb_entry_list[i];
467			if (pmbe->vpn == vaddr) {
468				found = 1;
469				break;
470			}
471		}
472	}
473
474	read_unlock(&pmb_rwlock);
475
476	if (found) {
477		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
478		return 0;
479	}
480
481	return -EINVAL;
482}
483
484static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
485{
486	do {
487		struct pmb_entry *pmblink = pmbe;
488
489		/*
490		 * We may be called before this pmb_entry has been
491		 * entered into the PMB table via set_pmb_entry(), but
492		 * that's OK because we've allocated a unique slot for
493		 * this entry in pmb_alloc() (even if we haven't filled
494		 * it yet).
495		 *
496		 * Therefore, calling __clear_pmb_entry() is safe as no
497		 * other mapping can be using that slot.
498		 */
499		__clear_pmb_entry(pmbe);
500
501		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
502
503		pmbe = pmblink->link;
504
505		pmb_free(pmblink);
506	} while (pmbe && --depth);
507}
508
509static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
510{
511	unsigned long flags;
512
513	if (unlikely(!pmbe))
514		return;
515
516	write_lock_irqsave(&pmb_rwlock, flags);
517	__pmb_unmap_entry(pmbe, depth);
518	write_unlock_irqrestore(&pmb_rwlock, flags);
519}
520
521static void __init pmb_notify(void)
522{
523	int i;
524
525	pr_info("PMB: boot mappings:\n");
526
527	read_lock(&pmb_rwlock);
528
529	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
530		struct pmb_entry *pmbe;
531
532		if (!test_bit(i, pmb_map))
533			continue;
534
535		pmbe = &pmb_entry_list[i];
536
537		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
538			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
539			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
540	}
541
542	read_unlock(&pmb_rwlock);
543}
544
545/*
546 * Sync our software copy of the PMB mappings with those in hardware. The
547 * mappings in the hardware PMB were either set up by the bootloader or
548 * very early on by the kernel.
549 */
550static void __init pmb_synchronize(void)
551{
552	struct pmb_entry *pmbp = NULL;
553	int i, j;
554
555	/*
556	 * Run through the initial boot mappings, log the established
557	 * ones, and blow away anything that falls outside of the valid
558	 * PPN range. Specifically, we only care about existing mappings
559	 * that impact the cached/uncached sections.
560	 *
561	 * Note that touching these can be a bit of a minefield; the boot
562	 * loader can establish multi-page mappings with the same caching
563	 * attributes, so we need to ensure that we aren't modifying a
564	 * mapping that we're presently executing from, or may execute
565	 * from in the case of straddling page boundaries.
566	 *
567	 * In the future we will have to tidy up after the boot loader by
568	 * jumping between the cached and uncached mappings and tearing
569	 * down alternating mappings while executing from the other.
570	 */
571	for (i = 0; i < NR_PMB_ENTRIES; i++) {
572		unsigned long addr, data;
573		unsigned long addr_val, data_val;
574		unsigned long ppn, vpn, flags;
575		unsigned long irqflags;
576		unsigned int size;
577		struct pmb_entry *pmbe;
578
579		addr = mk_pmb_addr(i);
580		data = mk_pmb_data(i);
581
582		addr_val = __raw_readl(addr);
583		data_val = __raw_readl(data);
584
585		/*
586		 * Skip over any bogus entries
587		 */
588		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
589			continue;
590
591		ppn = data_val & PMB_PFN_MASK;
592		vpn = addr_val & PMB_PFN_MASK;
593
594		/*
595		 * Only preserve in-range mappings.
596		 */
597		if (!pmb_ppn_in_range(ppn)) {
598			/*
599			 * Invalidate anything out of bounds.
600			 */
601			writel_uncached(addr_val & ~PMB_V, addr);
602			writel_uncached(data_val & ~PMB_V, data);
603			continue;
604		}
605
606		/*
607		 * Update the caching attributes if necessary
608		 */
609		if (data_val & PMB_C) {
610			data_val &= ~PMB_CACHE_MASK;
611			data_val |= pmb_cache_flags();
612
613			writel_uncached(data_val, data);
614		}
615
616		size = data_val & PMB_SZ_MASK;
617		flags = size | (data_val & PMB_CACHE_MASK);
618
619		pmbe = pmb_alloc(vpn, ppn, flags, i);
620		if (IS_ERR(pmbe)) {
621			WARN_ON_ONCE(1);
622			continue;
623		}
624
625		spin_lock_irqsave(&pmbe->lock, irqflags);
626
627		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
628			if (pmb_sizes[j].flag == size)
629				pmbe->size = pmb_sizes[j].size;
630
631		if (pmbp) {
632			spin_lock(&pmbp->lock);
633
634			/*
635			 * Compare the previous entry against the current one to
636			 * see if the entries span a contiguous mapping. If so,
637			 * setup the entry links accordingly. Compound mappings
638			 * are later coalesced.
639			 */
640			if (pmb_can_merge(pmbp, pmbe))
641				pmbp->link = pmbe;
642
643			spin_unlock(&pmbp->lock);
644		}
645
646		pmbp = pmbe;
647
648		spin_unlock_irqrestore(&pmbe->lock, irqflags);
649	}
650}
651
652static void __init pmb_merge(struct pmb_entry *head)
653{
654	unsigned long span, newsize;
655	struct pmb_entry *tail;
656	int i = 1, depth = 0;
657
658	span = newsize = head->size;
659
660	tail = head->link;
661	while (tail) {
662		span += tail->size;
663
664		if (pmb_size_valid(span)) {
665			newsize = span;
666			depth = i;
667		}
668
669		/* This is the end of the line.. */
670		if (!tail->link)
671			break;
672
673		tail = tail->link;
674		i++;
675	}
676
677	/*
678	 * The merged page size must be valid.
679	 */
680	if (!depth || !pmb_size_valid(newsize))
681		return;
682
683	head->flags &= ~PMB_SZ_MASK;
684	head->flags |= pmb_size_to_flags(newsize);
685
686	head->size = newsize;
687
688	__pmb_unmap_entry(head->link, depth);
689	__set_pmb_entry(head);
690}
691
692static void __init pmb_coalesce(void)
693{
694	unsigned long flags;
695	int i;
696
697	write_lock_irqsave(&pmb_rwlock, flags);
698
699	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
700		struct pmb_entry *pmbe;
701
702		if (!test_bit(i, pmb_map))
703			continue;
704
705		pmbe = &pmb_entry_list[i];
706
707		/*
708		 * We're only interested in compound mappings
709		 */
710		if (!pmbe->link)
711			continue;
712
713		/*
714		 * Nothing to do if it already uses the largest possible
715		 * page size.
716		 */
717		if (pmbe->size == SZ_512M)
718			continue;
719
720		pmb_merge(pmbe);
721	}
722
723	write_unlock_irqrestore(&pmb_rwlock, flags);
724}
725
726#ifdef CONFIG_UNCACHED_MAPPING
727static void __init pmb_resize(void)
728{
729	int i;
730
731	/*
732	 * If the uncached mapping was constructed by the kernel, it will
733	 * already be a reasonable size.
734	 */
735	if (uncached_size == SZ_16M)
736		return;
737
738	read_lock(&pmb_rwlock);
739
740	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
741		struct pmb_entry *pmbe;
742		unsigned long flags;
743
744		if (!test_bit(i, pmb_map))
745			continue;
746
747		pmbe = &pmb_entry_list[i];
748
749		if (pmbe->vpn != uncached_start)
750			continue;
751
752		/*
753		 * Found it, now resize it.
754		 */
755		spin_lock_irqsave(&pmbe->lock, flags);
756
757		pmbe->size = SZ_16M;
758		pmbe->flags &= ~PMB_SZ_MASK;
759		pmbe->flags |= pmb_size_to_flags(pmbe->size);
760
761		uncached_resize(pmbe->size);
762
763		__set_pmb_entry(pmbe);
764
765		spin_unlock_irqrestore(&pmbe->lock, flags);
766	}
767
768	read_unlock(&pmb_rwlock);
769}
770#endif
771
772static int __init early_pmb(char *p)
773{
774	if (!p)
775		return 0;
776
777	if (strstr(p, "iomap"))
778		pmb_iomapping_enabled = 1;
779
780	return 0;
781}
782early_param("pmb", early_pmb);
783
784void __init pmb_init(void)
785{
786	/* Synchronize software state */
787	pmb_synchronize();
788
789	/* Attempt to combine compound mappings */
790	pmb_coalesce();
791
792#ifdef CONFIG_UNCACHED_MAPPING
793	/* Resize initial mappings, if necessary */
794	pmb_resize();
795#endif
796
797	/* Log them */
798	pmb_notify();
799
800	writel_uncached(0, PMB_IRMCR);
801
802	/* Flush out the TLB */
803	local_flush_tlb_all();
804	ctrl_barrier();
805}
806
807bool __in_29bit_mode(void)
808{
809        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
810}
811
812static int pmb_seq_show(struct seq_file *file, void *iter)
813{
814	int i;
815
816	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
817			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
818	seq_printf(file, "ety   vpn  ppn  size   flags\n");
819
820	for (i = 0; i < NR_PMB_ENTRIES; i++) {
821		unsigned long addr, data;
822		unsigned int size;
823		char *sz_str = NULL;
824
825		addr = __raw_readl(mk_pmb_addr(i));
826		data = __raw_readl(mk_pmb_data(i));
827
828		size = data & PMB_SZ_MASK;
829		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
830			 (size == PMB_SZ_64M)  ? " 64MB":
831			 (size == PMB_SZ_128M) ? "128MB":
832					         "512MB";
833
834		/* 02: V 0x88 0x08 128MB C CB  B */
835		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
836			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
837			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
838			   sz_str, (data & PMB_C) ? 'C' : ' ',
839			   (data & PMB_WT) ? "WT" : "CB",
840			   (data & PMB_UB) ? "UB" : " B");
841	}
842
843	return 0;
844}
845
846static int pmb_debugfs_open(struct inode *inode, struct file *file)
847{
848	return single_open(file, pmb_seq_show, NULL);
849}
850
851static const struct file_operations pmb_debugfs_fops = {
852	.owner		= THIS_MODULE,
853	.open		= pmb_debugfs_open,
854	.read		= seq_read,
855	.llseek		= seq_lseek,
856	.release	= single_release,
857};
858
859static int __init pmb_debugfs_init(void)
860{
861	struct dentry *dentry;
862
863	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
864				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
865	if (!dentry)
866		return -ENOMEM;
867	if (IS_ERR(dentry))
868		return PTR_ERR(dentry);
869
870	return 0;
871}
872subsys_initcall(pmb_debugfs_init);
873
874#ifdef CONFIG_PM
875static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
876{
877	static pm_message_t prev_state;
878	int i;
879
880	/* Restore the PMB after a resume from hibernation */
881	if (state.event == PM_EVENT_ON &&
882	    prev_state.event == PM_EVENT_FREEZE) {
883		struct pmb_entry *pmbe;
884
885		read_lock(&pmb_rwlock);
886
887		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
888			if (test_bit(i, pmb_map)) {
889				pmbe = &pmb_entry_list[i];
890				set_pmb_entry(pmbe);
891			}
892		}
893
894		read_unlock(&pmb_rwlock);
895	}
896
897	prev_state = state;
898
899	return 0;
900}
901
902static int pmb_sysdev_resume(struct sys_device *dev)
903{
904	return pmb_sysdev_suspend(dev, PMSG_ON);
905}
906
907static struct sysdev_driver pmb_sysdev_driver = {
908	.suspend = pmb_sysdev_suspend,
909	.resume = pmb_sysdev_resume,
910};
911
912static int __init pmb_sysdev_init(void)
913{
914	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
915}
916subsys_initcall(pmb_sysdev_init);
917#endif
918