1#ifndef _LINUX_PAGEMAP_H 2#define _LINUX_PAGEMAP_H 3 4/* 5 * Copyright 1995 Linus Torvalds 6 */ 7#include <linux/mm.h> 8#include <linux/fs.h> 9#include <linux/list.h> 10#include <linux/highmem.h> 11#include <linux/compiler.h> 12#include <asm/uaccess.h> 13#include <linux/gfp.h> 14#include <linux/bitops.h> 15#include <linux/hardirq.h> /* for in_interrupt() */ 16#include <linux/hugetlb_inline.h> 17 18/* 19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 20 * allocation mode flags. 21 */ 22enum mapping_flags { 23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ 24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 27}; 28 29static inline void mapping_set_error(struct address_space *mapping, int error) 30{ 31 if (unlikely(error)) { 32 if (error == -ENOSPC) 33 set_bit(AS_ENOSPC, &mapping->flags); 34 else 35 set_bit(AS_EIO, &mapping->flags); 36 } 37} 38 39static inline void mapping_set_unevictable(struct address_space *mapping) 40{ 41 set_bit(AS_UNEVICTABLE, &mapping->flags); 42} 43 44static inline void mapping_clear_unevictable(struct address_space *mapping) 45{ 46 clear_bit(AS_UNEVICTABLE, &mapping->flags); 47} 48 49static inline int mapping_unevictable(struct address_space *mapping) 50{ 51 if (likely(mapping)) 52 return test_bit(AS_UNEVICTABLE, &mapping->flags); 53 return !!mapping; 54} 55 56static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 57{ 58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 59} 60 61/* 62 * This is non-atomic. Only to be used before the mapping is activated. 63 * Probably needs a barrier... 64 */ 65static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 66{ 67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | 68 (__force unsigned long)mask; 69} 70 71/* 72 * The page cache can done in larger chunks than 73 * one page, because it allows for more efficient 74 * throughput (it can then be mapped into user 75 * space in smaller chunks for same flexibility). 76 * 77 * Or rather, it _will_ be done in larger chunks. 78 */ 79#define PAGE_CACHE_SHIFT PAGE_SHIFT 80#define PAGE_CACHE_SIZE PAGE_SIZE 81#define PAGE_CACHE_MASK PAGE_MASK 82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) 83 84#define page_cache_get(page) get_page(page) 85#define page_cache_release(page) put_page(page) 86void release_pages(struct page **pages, int nr, int cold); 87 88/* 89 * speculatively take a reference to a page. 90 * If the page is free (_count == 0), then _count is untouched, and 0 91 * is returned. Otherwise, _count is incremented by 1 and 1 is returned. 92 * 93 * This function must be called inside the same rcu_read_lock() section as has 94 * been used to lookup the page in the pagecache radix-tree (or page table): 95 * this allows allocators to use a synchronize_rcu() to stabilize _count. 96 * 97 * Unless an RCU grace period has passed, the count of all pages coming out 98 * of the allocator must be considered unstable. page_count may return higher 99 * than expected, and put_page must be able to do the right thing when the 100 * page has been finished with, no matter what it is subsequently allocated 101 * for (because put_page is what is used here to drop an invalid speculative 102 * reference). 103 * 104 * This is the interesting part of the lockless pagecache (and lockless 105 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) 106 * has the following pattern: 107 * 1. find page in radix tree 108 * 2. conditionally increment refcount 109 * 3. check the page is still in pagecache (if no, goto 1) 110 * 111 * Remove-side that cares about stability of _count (eg. reclaim) has the 112 * following (with tree_lock held for write): 113 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) 114 * B. remove page from pagecache 115 * C. free the page 116 * 117 * There are 2 critical interleavings that matter: 118 * - 2 runs before A: in this case, A sees elevated refcount and bails out 119 * - A runs before 2: in this case, 2 sees zero refcount and retries; 120 * subsequently, B will complete and 1 will find no page, causing the 121 * lookup to return NULL. 122 * 123 * It is possible that between 1 and 2, the page is removed then the exact same 124 * page is inserted into the same position in pagecache. That's OK: the 125 * old find_get_page using tree_lock could equally have run before or after 126 * such a re-insertion, depending on order that locks are granted. 127 * 128 * Lookups racing against pagecache insertion isn't a big problem: either 1 129 * will find the page or it will not. Likewise, the old find_get_page could run 130 * either before the insertion or afterwards, depending on timing. 131 */ 132static inline int page_cache_get_speculative(struct page *page) 133{ 134 VM_BUG_ON(in_interrupt()); 135 136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 137# ifdef CONFIG_PREEMPT 138 VM_BUG_ON(!in_atomic()); 139# endif 140 /* 141 * Preempt must be disabled here - we rely on rcu_read_lock doing 142 * this for us. 143 * 144 * Pagecache won't be truncated from interrupt context, so if we have 145 * found a page in the radix tree here, we have pinned its refcount by 146 * disabling preempt, and hence no need for the "speculative get" that 147 * SMP requires. 148 */ 149 VM_BUG_ON(page_count(page) == 0); 150 atomic_inc(&page->_count); 151 152#else 153 if (unlikely(!get_page_unless_zero(page))) { 154 /* 155 * Either the page has been freed, or will be freed. 156 * In either case, retry here and the caller should 157 * do the right thing (see comments above). 158 */ 159 return 0; 160 } 161#endif 162 VM_BUG_ON(PageTail(page)); 163 164 return 1; 165} 166 167/* 168 * Same as above, but add instead of inc (could just be merged) 169 */ 170static inline int page_cache_add_speculative(struct page *page, int count) 171{ 172 VM_BUG_ON(in_interrupt()); 173 174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 175# ifdef CONFIG_PREEMPT 176 VM_BUG_ON(!in_atomic()); 177# endif 178 VM_BUG_ON(page_count(page) == 0); 179 atomic_add(count, &page->_count); 180 181#else 182 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 183 return 0; 184#endif 185 VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 186 187 return 1; 188} 189 190static inline int page_freeze_refs(struct page *page, int count) 191{ 192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count); 193} 194 195static inline void page_unfreeze_refs(struct page *page, int count) 196{ 197 VM_BUG_ON(page_count(page) != 0); 198 VM_BUG_ON(count == 0); 199 200 atomic_set(&page->_count, count); 201} 202 203#ifdef CONFIG_NUMA 204extern struct page *__page_cache_alloc(gfp_t gfp); 205#else 206static inline struct page *__page_cache_alloc(gfp_t gfp) 207{ 208 return alloc_pages(gfp, 0); 209} 210#endif 211 212static inline struct page *page_cache_alloc(struct address_space *x) 213{ 214 return __page_cache_alloc(mapping_gfp_mask(x)); 215} 216 217static inline struct page *page_cache_alloc_cold(struct address_space *x) 218{ 219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); 220} 221 222typedef int filler_t(void *, struct page *); 223 224extern struct page * find_get_page(struct address_space *mapping, 225 pgoff_t index); 226extern struct page * find_lock_page(struct address_space *mapping, 227 pgoff_t index); 228extern struct page * find_or_create_page(struct address_space *mapping, 229 pgoff_t index, gfp_t gfp_mask); 230unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 231 unsigned int nr_pages, struct page **pages); 232unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 233 unsigned int nr_pages, struct page **pages); 234unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 235 int tag, unsigned int nr_pages, struct page **pages); 236 237struct page *grab_cache_page_write_begin(struct address_space *mapping, 238 pgoff_t index, unsigned flags); 239 240/* 241 * Returns locked page at given index in given cache, creating it if needed. 242 */ 243static inline struct page *grab_cache_page(struct address_space *mapping, 244 pgoff_t index) 245{ 246 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 247} 248 249extern struct page * grab_cache_page_nowait(struct address_space *mapping, 250 pgoff_t index); 251extern struct page * read_cache_page_async(struct address_space *mapping, 252 pgoff_t index, filler_t *filler, 253 void *data); 254extern struct page * read_cache_page(struct address_space *mapping, 255 pgoff_t index, filler_t *filler, 256 void *data); 257extern struct page * read_cache_page_gfp(struct address_space *mapping, 258 pgoff_t index, gfp_t gfp_mask); 259extern int read_cache_pages(struct address_space *mapping, 260 struct list_head *pages, filler_t *filler, void *data); 261 262static inline struct page *read_mapping_page_async( 263 struct address_space *mapping, 264 pgoff_t index, void *data) 265{ 266 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 267 return read_cache_page_async(mapping, index, filler, data); 268} 269 270static inline struct page *read_mapping_page(struct address_space *mapping, 271 pgoff_t index, void *data) 272{ 273 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 274 return read_cache_page(mapping, index, filler, data); 275} 276 277/* 278 * Return byte-offset into filesystem object for page. 279 */ 280static inline loff_t page_offset(struct page *page) 281{ 282 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 283} 284 285extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 286 unsigned long address); 287 288static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 289 unsigned long address) 290{ 291 pgoff_t pgoff; 292 if (unlikely(is_vm_hugetlb_page(vma))) 293 return linear_hugepage_index(vma, address); 294 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 295 pgoff += vma->vm_pgoff; 296 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 297} 298 299extern void __lock_page(struct page *page); 300extern int __lock_page_killable(struct page *page); 301extern void __lock_page_nosync(struct page *page); 302extern void unlock_page(struct page *page); 303 304static inline void __set_page_locked(struct page *page) 305{ 306 __set_bit(PG_locked, &page->flags); 307} 308 309static inline void __clear_page_locked(struct page *page) 310{ 311 __clear_bit(PG_locked, &page->flags); 312} 313 314static inline int trylock_page(struct page *page) 315{ 316 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); 317} 318 319/* 320 * lock_page may only be called if we have the page's inode pinned. 321 */ 322static inline void lock_page(struct page *page) 323{ 324 might_sleep(); 325 if (!trylock_page(page)) 326 __lock_page(page); 327} 328 329/* 330 * lock_page_killable is like lock_page but can be interrupted by fatal 331 * signals. It returns 0 if it locked the page and -EINTR if it was 332 * killed while waiting. 333 */ 334static inline int lock_page_killable(struct page *page) 335{ 336 might_sleep(); 337 if (!trylock_page(page)) 338 return __lock_page_killable(page); 339 return 0; 340} 341 342/* 343 * lock_page_nosync should only be used if we can't pin the page's inode. 344 * Doesn't play quite so well with block device plugging. 345 */ 346static inline void lock_page_nosync(struct page *page) 347{ 348 might_sleep(); 349 if (!trylock_page(page)) 350 __lock_page_nosync(page); 351} 352 353/* 354 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 355 * Never use this directly! 356 */ 357extern void wait_on_page_bit(struct page *page, int bit_nr); 358 359/* 360 * Wait for a page to be unlocked. 361 * 362 * This must be called with the caller "holding" the page, 363 * ie with increased "page->count" so that the page won't 364 * go away during the wait.. 365 */ 366static inline void wait_on_page_locked(struct page *page) 367{ 368 if (PageLocked(page)) 369 wait_on_page_bit(page, PG_locked); 370} 371 372/* 373 * Wait for a page to complete writeback 374 */ 375static inline void wait_on_page_writeback(struct page *page) 376{ 377 if (PageWriteback(page)) 378 wait_on_page_bit(page, PG_writeback); 379} 380 381extern void end_page_writeback(struct page *page); 382 383/* 384 * Add an arbitrary waiter to a page's wait queue 385 */ 386extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); 387 388/* 389 * Fault a userspace page into pagetables. Return non-zero on a fault. 390 * 391 * This assumes that two userspace pages are always sufficient. That's 392 * not true if PAGE_CACHE_SIZE > PAGE_SIZE. 393 */ 394static inline int fault_in_pages_writeable(char __user *uaddr, int size) 395{ 396 int ret; 397 398 if (unlikely(size == 0)) 399 return 0; 400 401 /* 402 * Writing zeroes into userspace here is OK, because we know that if 403 * the zero gets there, we'll be overwriting it. 404 */ 405 ret = __put_user(0, uaddr); 406 if (ret == 0) { 407 char __user *end = uaddr + size - 1; 408 409 /* 410 * If the page was already mapped, this will get a cache miss 411 * for sure, so try to avoid doing it. 412 */ 413 if (((unsigned long)uaddr & PAGE_MASK) != 414 ((unsigned long)end & PAGE_MASK)) 415 ret = __put_user(0, end); 416 } 417 return ret; 418} 419 420static inline int fault_in_pages_readable(const char __user *uaddr, int size) 421{ 422 volatile char c; 423 int ret; 424 425 if (unlikely(size == 0)) 426 return 0; 427 428 ret = __get_user(c, uaddr); 429 if (ret == 0) { 430 const char __user *end = uaddr + size - 1; 431 432 if (((unsigned long)uaddr & PAGE_MASK) != 433 ((unsigned long)end & PAGE_MASK)) { 434 ret = __get_user(c, end); 435 (void)c; 436 } 437 } 438 return ret; 439} 440 441int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 442 pgoff_t index, gfp_t gfp_mask); 443int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 444 pgoff_t index, gfp_t gfp_mask); 445extern void remove_from_page_cache(struct page *page); 446extern void __remove_from_page_cache(struct page *page); 447 448/* 449 * Like add_to_page_cache_locked, but used to add newly allocated pages: 450 * the page is new, so we can just run __set_page_locked() against it. 451 */ 452static inline int add_to_page_cache(struct page *page, 453 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 454{ 455 int error; 456 457 __set_page_locked(page); 458 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 459 if (unlikely(error)) 460 __clear_page_locked(page); 461 return error; 462} 463 464#endif /* _LINUX_PAGEMAP_H */ 465