1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/highmem.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <asm/uaccess.h>
12#include <asm/processor.h>
13#include <asm/tlbflush.h>
14#include <asm/io.h>
15
16static inline pte_t *lookup_address(unsigned long address)
17{
18	pgd_t *pgd = pgd_offset_k(address);
19	pud_t *pud;
20	pmd_t *pmd;
21	pte_t *pte;
22	if (pgd_none(*pgd))
23		return NULL;
24	pud = pud_offset(pgd, address);
25	if (!pud_present(*pud))
26		return NULL;
27	pmd = pmd_offset(pud, address);
28	if (!pmd_present(*pmd))
29		return NULL;
30	if (pmd_large(*pmd))
31		return (pte_t *)pmd;
32	pte = pte_offset_kernel(pmd, address);
33	if (pte && !pte_present(*pte))
34		pte = NULL;
35	return pte;
36}
37
38static struct page *split_large_page(unsigned long address, pgprot_t prot,
39				     pgprot_t ref_prot)
40{
41	int i;
42	unsigned long addr;
43	struct page *base = alloc_pages(GFP_KERNEL, 0);
44	pte_t *pbase;
45	if (!base)
46		return NULL;
47	/*
48	 * page_private is used to track the number of entries in
49	 * the page table page have non standard attributes.
50	 */
51	SetPagePrivate(base);
52	page_private(base) = 0;
53
54	address = __pa(address);
55	addr = address & LARGE_PAGE_MASK;
56	pbase = (pte_t *)page_address(base);
57	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
58		pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
59				   addr == address ? prot : ref_prot);
60	}
61	return base;
62}
63
64static void cache_flush_page(void *adr)
65{
66	int i;
67	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
68		asm volatile("clflush (%0)" :: "r" (adr + i));
69}
70
71static void flush_kernel_map(void *arg)
72{
73	struct list_head *l = (struct list_head *)arg;
74	struct page *pg;
75
76	/* When clflush is available always use it because it is
77	   much cheaper than WBINVD. Disable clflush for now because
78	   the high level code is not ready yet */
79	if (1 || !cpu_has_clflush)
80		asm volatile("wbinvd" ::: "memory");
81	else list_for_each_entry(pg, l, lru) {
82		void *adr = page_address(pg);
83		if (cpu_has_clflush)
84			cache_flush_page(adr);
85	}
86	__flush_tlb_all();
87}
88
89static inline void flush_map(struct list_head *l)
90{
91	on_each_cpu(flush_kernel_map, l, 1, 1);
92}
93
94static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
95
96static inline void save_page(struct page *fpage)
97{
98	list_add(&fpage->lru, &deferred_pages);
99}
100
101/*
102 * No more special protections in this 2/4MB area - revert to a
103 * large page again.
104 */
105static void revert_page(unsigned long address, pgprot_t ref_prot)
106{
107	pgd_t *pgd;
108	pud_t *pud;
109	pmd_t *pmd;
110	pte_t large_pte;
111	unsigned long pfn;
112
113	pgd = pgd_offset_k(address);
114	BUG_ON(pgd_none(*pgd));
115	pud = pud_offset(pgd,address);
116	BUG_ON(pud_none(*pud));
117	pmd = pmd_offset(pud, address);
118	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
120	large_pte = pfn_pte(pfn, ref_prot);
121	large_pte = pte_mkhuge(large_pte);
122	set_pte((pte_t *)pmd, large_pte);
123}
124
125static int
126__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
127				   pgprot_t ref_prot)
128{
129	pte_t *kpte;
130	struct page *kpte_page;
131	pgprot_t ref_prot2;
132	kpte = lookup_address(address);
133	if (!kpte) return 0;
134	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
135	if (pgprot_val(prot) != pgprot_val(ref_prot)) {
136		if (!pte_huge(*kpte)) {
137			set_pte(kpte, pfn_pte(pfn, prot));
138		} else {
139 			/*
140			 * split_large_page will take the reference for this
141			 * change_page_attr on the split page.
142 			 */
143			struct page *split;
144			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
145			split = split_large_page(address, prot, ref_prot2);
146			if (!split)
147				return -ENOMEM;
148			set_pte(kpte, mk_pte(split, ref_prot2));
149			kpte_page = split;
150		}
151		page_private(kpte_page)++;
152	} else if (!pte_huge(*kpte)) {
153		set_pte(kpte, pfn_pte(pfn, ref_prot));
154		BUG_ON(page_private(kpte_page) == 0);
155		page_private(kpte_page)--;
156	} else
157		BUG();
158
159	/* on x86-64 the direct mapping set at boot is not using 4k pages */
160 	BUG_ON(PageReserved(kpte_page));
161
162	if (page_private(kpte_page) == 0) {
163		save_page(kpte_page);
164		revert_page(address, ref_prot);
165 	}
166	return 0;
167}
168
169/*
170 * Change the page attributes of an page in the linear mapping.
171 *
172 * This should be used when a page is mapped with a different caching policy
173 * than write-back somewhere - some CPUs do not like it when mappings with
174 * different caching policies exist. This changes the page attributes of the
175 * in kernel linear mapping too.
176 *
177 * The caller needs to ensure that there are no conflicting mappings elsewhere.
178 * This function only deals with the kernel linear map.
179 *
180 * Caller must call global_flush_tlb() after this.
181 */
182int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
183{
184	int err = 0, kernel_map = 0;
185	int i;
186
187	if (address >= __START_KERNEL_map
188	    && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
189		address = (unsigned long)__va(__pa(address));
190		kernel_map = 1;
191	}
192
193	down_write(&init_mm.mmap_sem);
194	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
195		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
196
197		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
198			err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
199			if (err)
200				break;
201		}
202		/* Handle kernel mapping too which aliases part of the
203		 * lowmem */
204		if (__pa(address) < KERNEL_TEXT_SIZE) {
205			unsigned long addr2;
206			pgprot_t prot2;
207			addr2 = __START_KERNEL_map + __pa(address);
208			/* Make sure the kernel mappings stay executable */
209			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
210			err = __change_page_attr(addr2, pfn, prot2,
211						 PAGE_KERNEL_EXEC);
212		}
213	}
214	up_write(&init_mm.mmap_sem);
215	return err;
216}
217
218/* Don't call this for MMIO areas that may not have a mem_map entry */
219int change_page_attr(struct page *page, int numpages, pgprot_t prot)
220{
221	unsigned long addr = (unsigned long)page_address(page);
222	return change_page_attr_addr(addr, numpages, prot);
223}
224
225void global_flush_tlb(void)
226{
227	struct page *pg, *next;
228	struct list_head l;
229
230	down_read(&init_mm.mmap_sem);
231	list_replace_init(&deferred_pages, &l);
232	up_read(&init_mm.mmap_sem);
233
234	flush_map(&l);
235
236	list_for_each_entry_safe(pg, next, &l, lru) {
237		ClearPagePrivate(pg);
238		__free_page(pg);
239	}
240}
241
242EXPORT_SYMBOL(change_page_attr);
243EXPORT_SYMBOL(global_flush_tlb);
244