• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/mm/
1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 *          Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/fs.h>
18#include <linux/rbtree.h>
19
20#include <asm/cacheflush.h>
21#include <asm/processor.h>
22#include <asm/tlbflush.h>
23#include <asm/x86_init.h>
24#include <asm/pgtable.h>
25#include <asm/fcntl.h>
26#include <asm/e820.h>
27#include <asm/mtrr.h>
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
31#include <asm/io.h>
32
33#include "pat_internal.h"
34
35#ifdef CONFIG_X86_PAT
36int __read_mostly pat_enabled = 1;
37
38static inline void pat_disable(const char *reason)
39{
40	pat_enabled = 0;
41	printk(KERN_INFO "%s\n", reason);
42}
43
44static int __init nopat(char *str)
45{
46	pat_disable("PAT support disabled.");
47	return 0;
48}
49early_param("nopat", nopat);
50#else
51static inline void pat_disable(const char *reason)
52{
53	(void)reason;
54}
55#endif
56
57
58int pat_debug_enable;
59
60static int __init pat_debug_setup(char *str)
61{
62	pat_debug_enable = 1;
63	return 0;
64}
65__setup("debugpat", pat_debug_setup);
66
67static u64 __read_mostly boot_pat_state;
68
69enum {
70	PAT_UC = 0,		/* uncached */
71	PAT_WC = 1,		/* Write combining */
72	PAT_WT = 4,		/* Write Through */
73	PAT_WP = 5,		/* Write Protected */
74	PAT_WB = 6,		/* Write Back (default) */
75	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
76};
77
78#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
79
80void pat_init(void)
81{
82	u64 pat;
83	bool boot_cpu = !boot_pat_state;
84
85	if (!pat_enabled)
86		return;
87
88	if (!cpu_has_pat) {
89		if (!boot_pat_state) {
90			pat_disable("PAT not supported by CPU.");
91			return;
92		} else {
93			/*
94			 * If this happens we are on a secondary CPU, but
95			 * switched to PAT on the boot CPU. We have no way to
96			 * undo PAT.
97			 */
98			printk(KERN_ERR "PAT enabled, "
99			       "but not supported by secondary CPU\n");
100			BUG();
101		}
102	}
103
104	/* Set PWT to Write-Combining. All other bits stay the same */
105	/*
106	 * PTE encoding used in Linux:
107	 *      PAT
108	 *      |PCD
109	 *      ||PWT
110	 *      |||
111	 *      000 WB		_PAGE_CACHE_WB
112	 *      001 WC		_PAGE_CACHE_WC
113	 *      010 UC-		_PAGE_CACHE_UC_MINUS
114	 *      011 UC		_PAGE_CACHE_UC
115	 * PAT bit unused
116	 */
117	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
118	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
119
120	/* Boot CPU check */
121	if (!boot_pat_state)
122		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
123
124	wrmsrl(MSR_IA32_CR_PAT, pat);
125
126	if (boot_cpu)
127		printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128		       smp_processor_id(), boot_pat_state, pat);
129}
130
131#undef PAT
132
133static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
134
135/*
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a
141 */
142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
143{
144	/*
145	 * Look for MTRR hint to get the effective type in case where PAT
146	 * request is for WB.
147	 */
148	if (req_type == _PAGE_CACHE_WB) {
149		u8 mtrr_type;
150
151		mtrr_type = mtrr_type_lookup(start, end);
152		if (mtrr_type != MTRR_TYPE_WRBACK)
153			return _PAGE_CACHE_UC_MINUS;
154
155		return _PAGE_CACHE_WB;
156	}
157
158	return req_type;
159}
160
161static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
162{
163	int ram_page = 0, not_rampage = 0;
164	unsigned long page_nr;
165
166	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
167	     ++page_nr) {
168		/*
169		 * For legacy reasons, physical address range in the legacy ISA
170		 * region is tracked as non-RAM. This will allow users of
171		 * /dev/mem to map portions of legacy ISA region, even when
172		 * some of those portions are listed(or not even listed) with
173		 * different e820 types(RAM/reserved/..)
174		 */
175		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
176		    page_is_ram(page_nr))
177			ram_page = 1;
178		else
179			not_rampage = 1;
180
181		if (ram_page == not_rampage)
182			return -1;
183	}
184
185	return ram_page;
186}
187
188/*
189 * For RAM pages, we use page flags to mark the pages with appropriate type.
190 * Here we do two pass:
191 * - Find the memtype of all the pages in the range, look for any conflicts
192 * - In case of no conflicts, set the new memtype for pages in the range
193 */
194static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
195				  unsigned long *new_type)
196{
197	struct page *page;
198	u64 pfn;
199
200	if (req_type == _PAGE_CACHE_UC) {
201		/* We do not support strong UC */
202		WARN_ON_ONCE(1);
203		req_type = _PAGE_CACHE_UC_MINUS;
204	}
205
206	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
207		unsigned long type;
208
209		page = pfn_to_page(pfn);
210		type = get_page_memtype(page);
211		if (type != -1) {
212			printk(KERN_INFO "reserve_ram_pages_type failed "
213				"0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
214				start, end, type, req_type);
215			if (new_type)
216				*new_type = type;
217
218			return -EBUSY;
219		}
220	}
221
222	if (new_type)
223		*new_type = req_type;
224
225	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
226		page = pfn_to_page(pfn);
227		set_page_memtype(page, req_type);
228	}
229	return 0;
230}
231
232static int free_ram_pages_type(u64 start, u64 end)
233{
234	struct page *page;
235	u64 pfn;
236
237	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
238		page = pfn_to_page(pfn);
239		set_page_memtype(page, -1);
240	}
241	return 0;
242}
243
244/*
245 * req_type typically has one of the:
246 * - _PAGE_CACHE_WB
247 * - _PAGE_CACHE_WC
248 * - _PAGE_CACHE_UC_MINUS
249 * - _PAGE_CACHE_UC
250 *
251 * If new_type is NULL, function will return an error if it cannot reserve the
252 * region with req_type. If new_type is non-NULL, function will return
253 * available type in new_type in case of no error. In case of any error
254 * it will return a negative return value.
255 */
256int reserve_memtype(u64 start, u64 end, unsigned long req_type,
257		    unsigned long *new_type)
258{
259	struct memtype *new;
260	unsigned long actual_type;
261	int is_range_ram;
262	int err = 0;
263
264	BUG_ON(start >= end); /* end is exclusive */
265
266	if (!pat_enabled) {
267		/* This is identical to page table setting without PAT */
268		if (new_type) {
269			if (req_type == _PAGE_CACHE_WC)
270				*new_type = _PAGE_CACHE_UC_MINUS;
271			else
272				*new_type = req_type & _PAGE_CACHE_MASK;
273		}
274		return 0;
275	}
276
277	/* Low ISA region is always mapped WB in page table. No need to track */
278	if (x86_platform.is_untracked_pat_range(start, end)) {
279		if (new_type)
280			*new_type = _PAGE_CACHE_WB;
281		return 0;
282	}
283
284	/*
285	 * Call mtrr_lookup to get the type hint. This is an
286	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
287	 * tools and ACPI tools). Use WB request for WB memory and use
288	 * UC_MINUS otherwise.
289	 */
290	actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
291
292	if (new_type)
293		*new_type = actual_type;
294
295	is_range_ram = pat_pagerange_is_ram(start, end);
296	if (is_range_ram == 1) {
297
298		err = reserve_ram_pages_type(start, end, req_type, new_type);
299
300		return err;
301	} else if (is_range_ram < 0) {
302		return -EINVAL;
303	}
304
305	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
306	if (!new)
307		return -ENOMEM;
308
309	new->start	= start;
310	new->end	= end;
311	new->type	= actual_type;
312
313	spin_lock(&memtype_lock);
314
315	err = rbt_memtype_check_insert(new, new_type);
316	if (err) {
317		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
318		       "track %s, req %s\n",
319		       start, end, cattr_name(new->type), cattr_name(req_type));
320		kfree(new);
321		spin_unlock(&memtype_lock);
322
323		return err;
324	}
325
326	spin_unlock(&memtype_lock);
327
328	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
329		start, end, cattr_name(new->type), cattr_name(req_type),
330		new_type ? cattr_name(*new_type) : "-");
331
332	return err;
333}
334
335int free_memtype(u64 start, u64 end)
336{
337	int err = -EINVAL;
338	int is_range_ram;
339	struct memtype *entry;
340
341	if (!pat_enabled)
342		return 0;
343
344	/* Low ISA region is always mapped WB. No need to track */
345	if (x86_platform.is_untracked_pat_range(start, end))
346		return 0;
347
348	is_range_ram = pat_pagerange_is_ram(start, end);
349	if (is_range_ram == 1) {
350
351		err = free_ram_pages_type(start, end);
352
353		return err;
354	} else if (is_range_ram < 0) {
355		return -EINVAL;
356	}
357
358	spin_lock(&memtype_lock);
359	entry = rbt_memtype_erase(start, end);
360	spin_unlock(&memtype_lock);
361
362	if (!entry) {
363		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
364			current->comm, current->pid, start, end);
365		return -EINVAL;
366	}
367
368	kfree(entry);
369
370	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
371
372	return 0;
373}
374
375
376/**
377 * lookup_memtype - Looksup the memory type for a physical address
378 * @paddr: physical address of which memory type needs to be looked up
379 *
380 * Only to be called when PAT is enabled
381 *
382 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
383 * _PAGE_CACHE_UC
384 */
385static unsigned long lookup_memtype(u64 paddr)
386{
387	int rettype = _PAGE_CACHE_WB;
388	struct memtype *entry;
389
390	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
391		return rettype;
392
393	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
394		struct page *page;
395		page = pfn_to_page(paddr >> PAGE_SHIFT);
396		rettype = get_page_memtype(page);
397		/*
398		 * -1 from get_page_memtype() implies RAM page is in its
399		 * default state and not reserved, and hence of type WB
400		 */
401		if (rettype == -1)
402			rettype = _PAGE_CACHE_WB;
403
404		return rettype;
405	}
406
407	spin_lock(&memtype_lock);
408
409	entry = rbt_memtype_lookup(paddr);
410	if (entry != NULL)
411		rettype = entry->type;
412	else
413		rettype = _PAGE_CACHE_UC_MINUS;
414
415	spin_unlock(&memtype_lock);
416	return rettype;
417}
418
419/**
420 * io_reserve_memtype - Request a memory type mapping for a region of memory
421 * @start: start (physical address) of the region
422 * @end: end (physical address) of the region
423 * @type: A pointer to memtype, with requested type. On success, requested
424 * or any other compatible type that was available for the region is returned
425 *
426 * On success, returns 0
427 * On failure, returns non-zero
428 */
429int io_reserve_memtype(resource_size_t start, resource_size_t end,
430			unsigned long *type)
431{
432	resource_size_t size = end - start;
433	unsigned long req_type = *type;
434	unsigned long new_type;
435	int ret;
436
437	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
438
439	ret = reserve_memtype(start, end, req_type, &new_type);
440	if (ret)
441		goto out_err;
442
443	if (!is_new_memtype_allowed(start, size, req_type, new_type))
444		goto out_free;
445
446	if (kernel_map_sync_memtype(start, size, new_type) < 0)
447		goto out_free;
448
449	*type = new_type;
450	return 0;
451
452out_free:
453	free_memtype(start, end);
454	ret = -EBUSY;
455out_err:
456	return ret;
457}
458
459/**
460 * io_free_memtype - Release a memory type mapping for a region of memory
461 * @start: start (physical address) of the region
462 * @end: end (physical address) of the region
463 */
464void io_free_memtype(resource_size_t start, resource_size_t end)
465{
466	free_memtype(start, end);
467}
468
469pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
470				unsigned long size, pgprot_t vma_prot)
471{
472	return vma_prot;
473}
474
475#ifdef CONFIG_STRICT_DEVMEM
476/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
477static inline int range_is_allowed(unsigned long pfn, unsigned long size)
478{
479	return 1;
480}
481#else
482/* This check is needed to avoid cache aliasing when PAT is enabled */
483static inline int range_is_allowed(unsigned long pfn, unsigned long size)
484{
485	u64 from = ((u64)pfn) << PAGE_SHIFT;
486	u64 to = from + size;
487	u64 cursor = from;
488
489	if (!pat_enabled)
490		return 1;
491
492	while (cursor < to) {
493		if (!devmem_is_allowed(pfn)) {
494			printk(KERN_INFO
495		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
496				current->comm, from, to);
497			return 0;
498		}
499		cursor += PAGE_SIZE;
500		pfn++;
501	}
502	return 1;
503}
504#endif /* CONFIG_STRICT_DEVMEM */
505
506int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
507				unsigned long size, pgprot_t *vma_prot)
508{
509	unsigned long flags = _PAGE_CACHE_WB;
510
511	if (!range_is_allowed(pfn, size))
512		return 0;
513
514	if (file->f_flags & O_DSYNC)
515		flags = _PAGE_CACHE_UC_MINUS;
516
517#ifdef CONFIG_X86_32
518	/*
519	 * On the PPro and successors, the MTRRs are used to set
520	 * memory types for physical addresses outside main memory,
521	 * so blindly setting UC or PWT on those pages is wrong.
522	 * For Pentiums and earlier, the surround logic should disable
523	 * caching for the high addresses through the KEN pin, but
524	 * we maintain the tradition of paranoia in this code.
525	 */
526	if (!pat_enabled &&
527	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
528	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
529	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
530	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
531	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
532		flags = _PAGE_CACHE_UC;
533	}
534#endif
535
536	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
537			     flags);
538	return 1;
539}
540
541/*
542 * Change the memory type for the physial address range in kernel identity
543 * mapping space if that range is a part of identity map.
544 */
545int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
546{
547	unsigned long id_sz;
548
549	if (base >= __pa(high_memory))
550		return 0;
551
552	id_sz = (__pa(high_memory) < base + size) ?
553				__pa(high_memory) - base :
554				size;
555
556	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
557		printk(KERN_INFO
558			"%s:%d ioremap_change_attr failed %s "
559			"for %Lx-%Lx\n",
560			current->comm, current->pid,
561			cattr_name(flags),
562			base, (unsigned long long)(base + size));
563		return -EINVAL;
564	}
565	return 0;
566}
567
568/*
569 * Internal interface to reserve a range of physical memory with prot.
570 * Reserved non RAM regions only and after successful reserve_memtype,
571 * this func also keeps identity mapping (if any) in sync with this new prot.
572 */
573static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
574				int strict_prot)
575{
576	int is_ram = 0;
577	int ret;
578	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
579	unsigned long flags = want_flags;
580
581	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
582
583	/*
584	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
585	 * track of number of mappings of RAM pages. We can assert that
586	 * the type requested matches the type of first page in the range.
587	 */
588	if (is_ram) {
589		if (!pat_enabled)
590			return 0;
591
592		flags = lookup_memtype(paddr);
593		if (want_flags != flags) {
594			printk(KERN_WARNING
595			"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
596				current->comm, current->pid,
597				cattr_name(want_flags),
598				(unsigned long long)paddr,
599				(unsigned long long)(paddr + size),
600				cattr_name(flags));
601			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
602					      (~_PAGE_CACHE_MASK)) |
603					     flags);
604		}
605		return 0;
606	}
607
608	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
609	if (ret)
610		return ret;
611
612	if (flags != want_flags) {
613		if (strict_prot ||
614		    !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
615			free_memtype(paddr, paddr + size);
616			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
617				" for %Lx-%Lx, got %s\n",
618				current->comm, current->pid,
619				cattr_name(want_flags),
620				(unsigned long long)paddr,
621				(unsigned long long)(paddr + size),
622				cattr_name(flags));
623			return -EINVAL;
624		}
625		/*
626		 * We allow returning different type than the one requested in
627		 * non strict case.
628		 */
629		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
630				      (~_PAGE_CACHE_MASK)) |
631				     flags);
632	}
633
634	if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
635		free_memtype(paddr, paddr + size);
636		return -EINVAL;
637	}
638	return 0;
639}
640
641/*
642 * Internal interface to free a range of physical memory.
643 * Frees non RAM regions only.
644 */
645static void free_pfn_range(u64 paddr, unsigned long size)
646{
647	int is_ram;
648
649	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
650	if (is_ram == 0)
651		free_memtype(paddr, paddr + size);
652}
653
654/*
655 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
656 * copied through copy_page_range().
657 *
658 * If the vma has a linear pfn mapping for the entire range, we get the prot
659 * from pte and reserve the entire vma range with single reserve_pfn_range call.
660 */
661int track_pfn_vma_copy(struct vm_area_struct *vma)
662{
663	resource_size_t paddr;
664	unsigned long prot;
665	unsigned long vma_size = vma->vm_end - vma->vm_start;
666	pgprot_t pgprot;
667
668	if (is_linear_pfn_mapping(vma)) {
669		/*
670		 * reserve the whole chunk covered by vma. We need the
671		 * starting address and protection from pte.
672		 */
673		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
674			WARN_ON_ONCE(1);
675			return -EINVAL;
676		}
677		pgprot = __pgprot(prot);
678		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
679	}
680
681	return 0;
682}
683
684/*
685 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
686 * for physical range indicated by pfn and size.
687 *
688 * prot is passed in as a parameter for the new mapping. If the vma has a
689 * linear pfn mapping for the entire range reserve the entire vma range with
690 * single reserve_pfn_range call.
691 */
692int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
693			unsigned long pfn, unsigned long size)
694{
695	unsigned long flags;
696	resource_size_t paddr;
697	unsigned long vma_size = vma->vm_end - vma->vm_start;
698
699	if (is_linear_pfn_mapping(vma)) {
700		/* reserve the whole chunk starting from vm_pgoff */
701		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
702		return reserve_pfn_range(paddr, vma_size, prot, 0);
703	}
704
705	if (!pat_enabled)
706		return 0;
707
708	/* for vm_insert_pfn and friends, we set prot based on lookup */
709	flags = lookup_memtype(pfn << PAGE_SHIFT);
710	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
711			 flags);
712
713	return 0;
714}
715
716/*
717 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
718 * untrack can be called for a specific region indicated by pfn and size or
719 * can be for the entire vma (in which case size can be zero).
720 */
721void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
722			unsigned long size)
723{
724	resource_size_t paddr;
725	unsigned long vma_size = vma->vm_end - vma->vm_start;
726
727	if (is_linear_pfn_mapping(vma)) {
728		/* free the whole chunk starting from vm_pgoff */
729		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
730		free_pfn_range(paddr, vma_size);
731		return;
732	}
733}
734
735pgprot_t pgprot_writecombine(pgprot_t prot)
736{
737	if (pat_enabled)
738		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
739	else
740		return pgprot_noncached(prot);
741}
742EXPORT_SYMBOL_GPL(pgprot_writecombine);
743
744#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
745
746static struct memtype *memtype_get_idx(loff_t pos)
747{
748	struct memtype *print_entry;
749	int ret;
750
751	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
752	if (!print_entry)
753		return NULL;
754
755	spin_lock(&memtype_lock);
756	ret = rbt_memtype_copy_nth_element(print_entry, pos);
757	spin_unlock(&memtype_lock);
758
759	if (!ret) {
760		return print_entry;
761	} else {
762		kfree(print_entry);
763		return NULL;
764	}
765}
766
767static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
768{
769	if (*pos == 0) {
770		++*pos;
771		seq_printf(seq, "PAT memtype list:\n");
772	}
773
774	return memtype_get_idx(*pos);
775}
776
777static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
778{
779	++*pos;
780	return memtype_get_idx(*pos);
781}
782
783static void memtype_seq_stop(struct seq_file *seq, void *v)
784{
785}
786
787static int memtype_seq_show(struct seq_file *seq, void *v)
788{
789	struct memtype *print_entry = (struct memtype *)v;
790
791	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
792			print_entry->start, print_entry->end);
793	kfree(print_entry);
794
795	return 0;
796}
797
798static const struct seq_operations memtype_seq_ops = {
799	.start = memtype_seq_start,
800	.next  = memtype_seq_next,
801	.stop  = memtype_seq_stop,
802	.show  = memtype_seq_show,
803};
804
805static int memtype_seq_open(struct inode *inode, struct file *file)
806{
807	return seq_open(file, &memtype_seq_ops);
808}
809
810static const struct file_operations memtype_fops = {
811	.open    = memtype_seq_open,
812	.read    = seq_read,
813	.llseek  = seq_lseek,
814	.release = seq_release,
815};
816
817static int __init pat_memtype_list_init(void)
818{
819	if (pat_enabled) {
820		debugfs_create_file("pat_memtype_list", S_IRUSR,
821				    arch_debugfs_dir, NULL, &memtype_fops);
822	}
823	return 0;
824}
825
826late_initcall(pat_memtype_list_init);
827
828#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
829