1/* 2 * Copyright 2002 Andi Kleen, SuSE Labs. 3 * Thanks to Ben LaHaise for precious feedback. 4 */ 5 6#include <linux/config.h> 7#include <linux/mm.h> 8#include <linux/sched.h> 9#include <linux/highmem.h> 10#include <linux/module.h> 11#include <asm/uaccess.h> 12#include <asm/processor.h> 13#include <asm/io.h> 14 15static inline pte_t *lookup_address(unsigned long address) 16{ 17 pgd_t *pgd = pgd_offset_k(address); 18 pmd_t *pmd; 19 20 if (!pgd) return NULL; 21 pmd = pmd_offset(pgd, address); 22 if (!pmd) return NULL; 23 if ((pmd_val(*pmd) & PAGE_LARGE) == PAGE_LARGE) 24 return (pte_t *)pmd; 25 26 return pte_offset(pmd, address); 27} 28 29static struct page *split_large_page(unsigned long address, pgprot_t prot) 30{ 31 int i; 32 unsigned long addr; 33 struct page *base = alloc_pages(GFP_KERNEL, 0); 34 pte_t *pbase; 35 if (!base) 36 return NULL; 37 address = __pa(address); 38 addr = address & LARGE_PAGE_MASK; 39 pbase = (pte_t *)page_address(base); 40 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 41 pbase[i] = mk_pte_phys(addr, 42 addr == address ? prot : PAGE_KERNEL); 43 } 44 return base; 45} 46 47static void flush_kernel_map(void * address) 48{ 49 struct cpuinfo_x86 *cpu = &cpu_data[smp_processor_id()]; 50 wmb(); 51 /* Disabled for now because there seem to be some problems with CLFLUSH */ 52 if (0 && test_bit(X86_FEATURE_CLFLSH, &cpu->x86_capability)) { 53 /* is this worth it? */ 54 int i; 55 for (i = 0; i < PAGE_SIZE; i += cpu->x86_clflush_size) 56 asm volatile("clflush %0" :: "m" (__pa(address) + i)); 57 } else 58 asm volatile("wbinvd":::"memory"); 59 __flush_tlb_one(address); 60} 61 62/* no more special protections in this 2/4MB area - revert to a 63 large page again. */ 64static inline void revert_page(struct page *kpte_page, unsigned long address) 65{ 66 pgd_t *pgd; 67 pmd_t *pmd; 68 pte_t large_pte; 69 70 pgd = pgd_offset_k(address); 71 if (!pgd) BUG(); 72 pmd = pmd_offset(pgd, address); 73 if (!pmd) BUG(); 74 if ((pmd_val(*pmd) & _PAGE_GLOBAL) == 0) BUG(); 75 76 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, PAGE_KERNEL_LARGE); 77 set_pte((pte_t *)pmd, large_pte); 78} 79 80/* 81 * Change the page attributes of an page in the linear mapping. 82 * 83 * This should be used when a page is mapped with a different caching policy 84 * than write-back somewhere - some CPUs do not like it when mappings with 85 * different caching policies exist. This changes the page attributes of the 86 * in kernel linear mapping too. 87 * 88 * The caller needs to ensure that there are no conflicting mappings elsewhere. 89 * This function only deals with the kernel linear map. 90 * When page is in highmem it must never be kmap'ed. 91 */ 92static int 93__change_page_attr(unsigned long address, struct page *page, pgprot_t prot, 94 struct page **oldpage) 95{ 96 pte_t *kpte; 97 struct page *kpte_page; 98 99 kpte = lookup_address(address); 100 if (!kpte) 101 return 0; /* not mapped in kernel */ 102 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); 103 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 104 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 105 pte_t old = *kpte; 106 pte_t standard = mk_pte(page, PAGE_KERNEL); 107 108 set_pte(kpte, mk_pte(page, prot)); 109 if (pte_same(old,standard)) 110 atomic_inc(&kpte_page->count); 111 } else { 112 struct page *split = split_large_page(address, prot); 113 if (!split) 114 return -ENOMEM; 115 set_pte(kpte,mk_pte(split, PAGE_KERNEL)); 116 } 117 } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 118 set_pte(kpte, mk_pte(page, PAGE_KERNEL)); 119 atomic_dec(&kpte_page->count); 120 } 121 122 if (atomic_read(&kpte_page->count) == 1) { 123 *oldpage = kpte_page; 124 revert_page(kpte_page, address); 125 } 126 return 0; 127} 128 129static inline void flush_and_free(void *address, struct page *fpage) 130{ 131#ifdef CONFIG_SMP 132 smp_call_function(flush_kernel_map, address, 1, 1); 133#endif 134 flush_kernel_map(address); 135 if (fpage) 136 __free_page(fpage); 137} 138 139int change_page_attr(struct page *page, int numpages, pgprot_t prot) 140{ 141 int err = 0; 142 struct page *fpage, *fpage2; 143 int i; 144 145 down_write(&init_mm.mmap_sem); 146 for (i = 0; i < numpages; i++, page++) { 147 fpage = fpage2 = NULL; 148 err = __change_page_attr((unsigned long)page_address(page), 149 page, prot, &fpage); 150 151 /* Handle kernel mapping too which aliases part of the lowmem */ 152 if (!err && page_to_phys(page) < KERNEL_TEXT_SIZE) { 153 err = __change_page_attr((unsigned long) __START_KERNEL_map + 154 page_to_phys(page), 155 page, prot, &fpage2); 156 } 157 158 if (err) 159 break; 160 161 if (fpage || fpage2 || i == numpages-1) { 162 flush_and_free(page_address(page), fpage); 163 if (unlikely(fpage2 != NULL)) 164 flush_and_free((char *)__START_KERNEL_map + 165 page_to_phys(page), fpage2); 166 } 167 } 168 up_write(&init_mm.mmap_sem); 169 return err; 170} 171 172EXPORT_SYMBOL(change_page_attr); 173