1/* 2 * High memory handling common code and variables. 3 * 4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de 5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de 6 * 7 * 8 * Redesigned the x86 32-bit VM architecture to deal with 9 * 64-bit physical space. With current x86 CPUs this 10 * means up to 64 Gigabytes physical RAM. 11 * 12 * Rewrote high memory support to move the page cache into 13 * high memory. Implemented permanent (schedulable) kmaps 14 * based on Linus' idea. 15 * 16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 17 */ 18 19#include <linux/mm.h> 20#include <linux/module.h> 21#include <linux/swap.h> 22#include <linux/bio.h> 23#include <linux/pagemap.h> 24#include <linux/mempool.h> 25#include <linux/blkdev.h> 26#include <linux/init.h> 27#include <linux/hash.h> 28#include <linux/highmem.h> 29#include <linux/blktrace_api.h> 30#include <asm/tlbflush.h> 31 32/* 33 * Virtual_count is not a pure "count". 34 * 0 means that it is not mapped, and has not been mapped 35 * since a TLB flush - it is usable. 36 * 1 means that there are no users, but it has been mapped 37 * since the last TLB flush - so we can't use it. 38 * n means that there are (n-1) current users of it. 39 */ 40 41#ifdef CONFIG_HIGHMEM 42 43unsigned long totalhigh_pages __read_mostly; 44 45unsigned int nr_free_highpages (void) 46{ 47 pg_data_t *pgdat; 48 unsigned int pages = 0; 49 50 for_each_online_pgdat(pgdat) 51 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 52 NR_FREE_PAGES); 53 54 return pages; 55} 56 57static int pkmap_count[LAST_PKMAP]; 58static unsigned int last_pkmap_nr; 59static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 60 61pte_t * pkmap_page_table; 62 63static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); 64 65static void flush_all_zero_pkmaps(void) 66{ 67 int i; 68 69 flush_cache_kmaps(); 70 71 for (i = 0; i < LAST_PKMAP; i++) { 72 struct page *page; 73 74 /* 75 * zero means we don't have anything to do, 76 * >1 means that it is still in use. Only 77 * a count of 1 means that it is free but 78 * needs to be unmapped 79 */ 80 if (pkmap_count[i] != 1) 81 continue; 82 pkmap_count[i] = 0; 83 84 /* sanity check */ 85 BUG_ON(pte_none(pkmap_page_table[i])); 86 87 /* 88 * Don't need an atomic fetch-and-clear op here; 89 * no-one has the page mapped, and cannot get at 90 * its virtual address (and hence PTE) without first 91 * getting the kmap_lock (which is held here). 92 * So no dangers, even with speculative execution. 93 */ 94 page = pte_page(pkmap_page_table[i]); 95 pte_clear(&init_mm, (unsigned long)page_address(page), 96 &pkmap_page_table[i]); 97 98 set_page_address(page, NULL); 99 } 100 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 101} 102 103/* Flush all unused kmap mappings in order to remove stray 104 mappings. */ 105void kmap_flush_unused(void) 106{ 107 spin_lock(&kmap_lock); 108 flush_all_zero_pkmaps(); 109 spin_unlock(&kmap_lock); 110} 111 112static inline unsigned long map_new_virtual(struct page *page) 113{ 114 unsigned long vaddr; 115 int count; 116 117start: 118 count = LAST_PKMAP; 119 /* Find an empty entry */ 120 for (;;) { 121 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; 122 if (!last_pkmap_nr) { 123 flush_all_zero_pkmaps(); 124 count = LAST_PKMAP; 125 } 126 if (!pkmap_count[last_pkmap_nr]) { 127 if (cpu_has_dc_aliases) { 128 unsigned int pfn, map_pfn; 129 130 /* check page color */ 131 pfn = page_to_pfn(page); 132 map_pfn = PKMAP_ADDR(last_pkmap_nr) >> PAGE_SHIFT; 133 134 /* Avoide possibility of cache Aliasing */ 135 if (!pages_do_alias((map_pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT))) 136 break; /* Found a usable entry */ 137 } else 138 break; /* Found a usable entry */ 139 } 140 if (--count) 141 continue; 142 143 /* 144 * Sleep for somebody else to unmap their entries 145 */ 146 { 147 DECLARE_WAITQUEUE(wait, current); 148 149 __set_current_state(TASK_UNINTERRUPTIBLE); 150 add_wait_queue(&pkmap_map_wait, &wait); 151 spin_unlock(&kmap_lock); 152 schedule(); 153 remove_wait_queue(&pkmap_map_wait, &wait); 154 spin_lock(&kmap_lock); 155 156 /* Somebody else might have mapped it while we slept */ 157 if (page_address(page)) 158 return (unsigned long)page_address(page); 159 160 /* Re-start */ 161 goto start; 162 } 163 } 164 vaddr = PKMAP_ADDR(last_pkmap_nr); 165 set_pte_at(&init_mm, vaddr, 166 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); 167 168 pkmap_count[last_pkmap_nr] = 1; 169 set_page_address(page, (void *)vaddr); 170 171 return vaddr; 172} 173 174void fastcall *kmap_high(struct page *page) 175{ 176 unsigned long vaddr; 177 178 /* 179 * For highmem pages, we can't trust "virtual" until 180 * after we have the lock. 181 * 182 * We cannot call this from interrupts, as it may block 183 */ 184 spin_lock(&kmap_lock); 185 vaddr = (unsigned long)page_address(page); 186 if (!vaddr) 187 vaddr = map_new_virtual(page); 188 pkmap_count[PKMAP_NR(vaddr)]++; 189 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); 190 spin_unlock(&kmap_lock); 191 return (void*) vaddr; 192} 193 194EXPORT_SYMBOL(kmap_high); 195 196void fastcall kunmap_high(struct page *page) 197{ 198 unsigned long vaddr; 199 unsigned long nr; 200 int need_wakeup; 201 202 spin_lock(&kmap_lock); 203 vaddr = (unsigned long)page_address(page); 204 BUG_ON(!vaddr); 205 nr = PKMAP_NR(vaddr); 206 207 /* 208 * A count must never go down to zero 209 * without a TLB flush! 210 */ 211 need_wakeup = 0; 212 switch (--pkmap_count[nr]) { 213 case 0: 214 BUG(); 215 case 1: 216 /* 217 * Avoid an unnecessary wake_up() function call. 218 * The common case is pkmap_count[] == 1, but 219 * no waiters. 220 * The tasks queued in the wait-queue are guarded 221 * by both the lock in the wait-queue-head and by 222 * the kmap_lock. As the kmap_lock is held here, 223 * no need for the wait-queue-head's lock. Simply 224 * test if the queue is empty. 225 */ 226 need_wakeup = waitqueue_active(&pkmap_map_wait); 227 } 228 spin_unlock(&kmap_lock); 229 230 /* do wake-up, if needed, race-free outside of the spin lock */ 231 if (need_wakeup) 232 wake_up(&pkmap_map_wait); 233} 234 235EXPORT_SYMBOL(kunmap_high); 236#endif 237 238#if defined(HASHED_PAGE_VIRTUAL) 239 240#define PA_HASH_ORDER 7 241 242/* 243 * Describes one page->virtual association 244 */ 245struct page_address_map { 246 struct page *page; 247 void *virtual; 248 struct list_head list; 249}; 250 251/* 252 * page_address_map freelist, allocated from page_address_maps. 253 */ 254static struct list_head page_address_pool; /* freelist */ 255static spinlock_t pool_lock; /* protects page_address_pool */ 256 257/* 258 * Hash table bucket 259 */ 260static struct page_address_slot { 261 struct list_head lh; /* List of page_address_maps */ 262 spinlock_t lock; /* Protect this bucket's list */ 263} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; 264 265static struct page_address_slot *page_slot(struct page *page) 266{ 267 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; 268} 269 270void *page_address(struct page *page) 271{ 272 unsigned long flags; 273 void *ret; 274 struct page_address_slot *pas; 275 276 if (!PageHighMem(page)) 277 return lowmem_page_address(page); 278 279 pas = page_slot(page); 280 ret = NULL; 281 spin_lock_irqsave(&pas->lock, flags); 282 if (!list_empty(&pas->lh)) { 283 struct page_address_map *pam; 284 285 list_for_each_entry(pam, &pas->lh, list) { 286 if (pam->page == page) { 287 ret = pam->virtual; 288 goto done; 289 } 290 } 291 } 292done: 293 spin_unlock_irqrestore(&pas->lock, flags); 294 return ret; 295} 296 297EXPORT_SYMBOL(page_address); 298 299void set_page_address(struct page *page, void *virtual) 300{ 301 unsigned long flags; 302 struct page_address_slot *pas; 303 struct page_address_map *pam; 304 305 BUG_ON(!PageHighMem(page)); 306 307 pas = page_slot(page); 308 if (virtual) { /* Add */ 309 BUG_ON(list_empty(&page_address_pool)); 310 311 spin_lock_irqsave(&pool_lock, flags); 312 pam = list_entry(page_address_pool.next, 313 struct page_address_map, list); 314 list_del(&pam->list); 315 spin_unlock_irqrestore(&pool_lock, flags); 316 317 pam->page = page; 318 pam->virtual = virtual; 319 320 spin_lock_irqsave(&pas->lock, flags); 321 list_add_tail(&pam->list, &pas->lh); 322 spin_unlock_irqrestore(&pas->lock, flags); 323 } else { /* Remove */ 324 spin_lock_irqsave(&pas->lock, flags); 325 list_for_each_entry(pam, &pas->lh, list) { 326 if (pam->page == page) { 327 list_del(&pam->list); 328 spin_unlock_irqrestore(&pas->lock, flags); 329 spin_lock_irqsave(&pool_lock, flags); 330 list_add_tail(&pam->list, &page_address_pool); 331 spin_unlock_irqrestore(&pool_lock, flags); 332 goto done; 333 } 334 } 335 spin_unlock_irqrestore(&pas->lock, flags); 336 } 337done: 338 return; 339} 340 341static struct page_address_map page_address_maps[LAST_PKMAP]; 342 343void __init page_address_init(void) 344{ 345 int i; 346 347 INIT_LIST_HEAD(&page_address_pool); 348 for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) 349 list_add(&page_address_maps[i].list, &page_address_pool); 350 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { 351 INIT_LIST_HEAD(&page_address_htable[i].lh); 352 spin_lock_init(&page_address_htable[i].lock); 353 } 354 spin_lock_init(&pool_lock); 355} 356 357#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ 358