1#ifndef _LINUX_MM_H 2#define _LINUX_MM_H 3 4#include <linux/sched.h> 5#include <linux/errno.h> 6 7#ifdef __KERNEL__ 8 9#include <linux/config.h> 10#include <linux/string.h> 11#include <linux/list.h> 12#include <linux/mmzone.h> 13#include <linux/swap.h> 14#include <linux/rbtree.h> 15 16extern unsigned long max_mapnr; 17extern unsigned long num_physpages; 18extern unsigned long num_mappedpages; 19extern void * high_memory; 20extern int page_cluster; 21/* The inactive_clean lists are per zone. */ 22extern struct list_head active_list; 23extern struct list_head inactive_list; 24 25#include <asm/page.h> 26#include <asm/pgtable.h> 27#include <asm/atomic.h> 28 29/* 30 * Linux kernel virtual memory manager primitives. 31 * The idea being to have a "virtual" mm in the same way 32 * we have a virtual fs - giving a cleaner interface to the 33 * mm details, and allowing different kinds of memory mappings 34 * (from shared memory to executable loading to arbitrary 35 * mmap() functions). 36 */ 37 38/* 39 * This struct defines a memory VMM memory area. There is one of these 40 * per VM-area/task. A VM area is any part of the process virtual memory 41 * space that has a special rule for the page-fault handlers (ie a shared 42 * library, the executable area etc). 43 */ 44struct vm_area_struct { 45 struct mm_struct * vm_mm; /* The address space we belong to. */ 46 unsigned long vm_start; /* Our start address within vm_mm. */ 47 unsigned long vm_end; /* The first byte after our end address 48 within vm_mm. */ 49 50 /* linked list of VM areas per task, sorted by address */ 51 struct vm_area_struct *vm_next; 52 53 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 54 unsigned long vm_flags; /* Flags, listed below. */ 55 56 rb_node_t vm_rb; 57 58 /* 59 * For areas with an address space and backing store, 60 * one of the address_space->i_mmap{,shared} lists, 61 * for shm areas, the list of attaches, otherwise unused. 62 */ 63 struct vm_area_struct *vm_next_share; 64 struct vm_area_struct **vm_pprev_share; 65 66 /* Function pointers to deal with this struct. */ 67 struct vm_operations_struct * vm_ops; 68 69 /* Information about our backing store: */ 70 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 71 units, *not* PAGE_CACHE_SIZE */ 72 struct file * vm_file; /* File we map to (can be NULL). */ 73 unsigned long vm_raend; 74 void * vm_private_data; /* was vm_pte (shared mem) */ 75}; 76 77/* 78 * vm_flags.. 79 */ 80#define VM_READ 0x00000001 /* currently active flags */ 81#define VM_WRITE 0x00000002 82#define VM_EXEC 0x00000004 83#define VM_SHARED 0x00000008 84 85#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 86#define VM_MAYWRITE 0x00000020 87#define VM_MAYEXEC 0x00000040 88#define VM_MAYSHARE 0x00000080 89 90#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 91#define VM_GROWSUP 0x00000200 92#define VM_SHM 0x00000400 /* shared memory area, don't swap out */ 93#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 94 95#define VM_EXECUTABLE 0x00001000 96#define VM_LOCKED 0x00002000 97#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 98 99 /* Used by sys_madvise() */ 100#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 101#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 102 103#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 104#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 105#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ 106 107#define VM_STACK_FLAGS 0x00000177 108 109#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) 110#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK 111#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) 112#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) 113#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) 114 115/* read ahead limits */ 116extern int vm_min_readahead; 117extern int vm_max_readahead; 118 119/* 120 * mapping from the currently active vm_flags protection bits (the 121 * low four bits) to a page protection mask.. 122 */ 123extern pgprot_t protection_map[16]; 124 125 126/* 127 * These are the virtual MM functions - opening of an area, closing and 128 * unmapping it (needed to keep files on disk up-to-date etc), pointer 129 * to the functions called when a no-page or a wp-page exception occurs. 130 */ 131struct vm_operations_struct { 132 void (*open)(struct vm_area_struct * area); 133 void (*close)(struct vm_area_struct * area); 134 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused); 135}; 136 137/* 138 * Each physical page in the system has a struct page associated with 139 * it to keep track of whatever it is we are using the page for at the 140 * moment. Note that we have no way to track which tasks are using 141 * a page. 142 * 143 * Try to keep the most commonly accessed fields in single cache lines 144 * here (16 bytes or greater). This ordering should be particularly 145 * beneficial on 32-bit processors. 146 * 147 * The first line is data used in page cache lookup, the second line 148 * is used for linear searches (eg. clock algorithm scans). 149 * 150 * TODO: make this structure smaller, it could be as small as 32 bytes. 151 */ 152typedef struct page { 153 struct list_head list; /* ->mapping has some page lists. */ 154 struct address_space *mapping; /* The inode (or ...) we belong to. */ 155 unsigned long index; /* Our offset within mapping. */ 156 struct page *next_hash; /* Next page sharing our hash bucket in 157 the pagecache hash table. */ 158 atomic_t count; /* Usage count, see below. */ 159 unsigned long flags; /* atomic flags, some possibly 160 updated asynchronously */ 161 struct list_head lru; /* Pageout list, eg. active_list; 162 protected by pagemap_lru_lock !! */ 163 struct page **pprev_hash; /* Complement to *next_hash. */ 164 struct buffer_head * buffers; /* Buffer maps us to a disk block. */ 165 166 /* 167 * On machines where all RAM is mapped into kernel address space, 168 * we can simply calculate the virtual address. On machines with 169 * highmem some memory is mapped into kernel virtual memory 170 * dynamically, so we need a place to store that address. 171 * Note that this field could be 16 bits on x86 ... ;) 172 * 173 * Architectures with slow multiplication can define 174 * WANT_PAGE_VIRTUAL in asm/page.h 175 */ 176#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) 177 void *virtual; /* Kernel virtual address (NULL if 178 not kmapped, ie. highmem) */ 179#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */ 180} mem_map_t; 181 182/* 183 * Methods to modify the page usage count. 184 * 185 * What counts for a page usage: 186 * - cache mapping (page->mapping) 187 * - disk mapping (page->buffers) 188 * - page mapped in a task's page tables, each mapping 189 * is counted separately 190 * 191 * Also, many kernel routines increase the page count before a critical 192 * routine so they can be sure the page doesn't go away from under them. 193 */ 194#define get_page(p) atomic_inc(&(p)->count) 195#define put_page(p) __free_page(p) 196#define put_page_testzero(p) atomic_dec_and_test(&(p)->count) 197#define page_count(p) atomic_read(&(p)->count) 198#define set_page_count(p,v) atomic_set(&(p)->count, v) 199 200/* 201 * Various page->flags bits: 202 * 203 * PG_reserved is set for special pages, which can never be swapped 204 * out. Some of them might not even exist (eg empty_bad_page)... 205 * 206 * Multiple processes may "see" the same page. E.g. for untouched 207 * mappings of /dev/null, all processes see the same page full of 208 * zeroes, and text pages of executables and shared libraries have 209 * only one copy in memory, at most, normally. 210 * 211 * For the non-reserved pages, page->count denotes a reference count. 212 * page->count == 0 means the page is free. 213 * page->count == 1 means the page is used for exactly one purpose 214 * (e.g. a private data page of one process). 215 * 216 * A page may be used for kmalloc() or anyone else who does a 217 * __get_free_page(). In this case the page->count is at least 1, and 218 * all other fields are unused but should be 0 or NULL. The 219 * management of this page is the responsibility of the one who uses 220 * it. 221 * 222 * The other pages (we may call them "process pages") are completely 223 * managed by the Linux memory manager: I/O, buffers, swapping etc. 224 * The following discussion applies only to them. 225 * 226 * A page may belong to an inode's memory mapping. In this case, 227 * page->mapping is the pointer to the inode, and page->index is the 228 * file offset of the page, in units of PAGE_CACHE_SIZE. 229 * 230 * A page may have buffers allocated to it. In this case, 231 * page->buffers is a circular list of these buffer heads. Else, 232 * page->buffers == NULL. 233 * 234 * For pages belonging to inodes, the page->count is the number of 235 * attaches, plus 1 if buffers are allocated to the page, plus one 236 * for the page cache itself. 237 * 238 * All pages belonging to an inode are in these doubly linked lists: 239 * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages; 240 * using the page->list list_head. These fields are also used for 241 * freelist managemet (when page->count==0). 242 * 243 * There is also a hash table mapping (mapping,index) to the page 244 * in memory if present. The lists for this hash table use the fields 245 * page->next_hash and page->pprev_hash. 246 * 247 * All process pages can do I/O: 248 * - inode pages may need to be read from disk, 249 * - inode pages which have been modified and are MAP_SHARED may need 250 * to be written to disk, 251 * - private pages which have been modified may need to be swapped out 252 * to swap space and (later) to be read back into memory. 253 * During disk I/O, PG_locked is used. This bit is set before I/O 254 * and reset when I/O completes. page_waitqueue(page) is a wait queue of all 255 * tasks waiting for the I/O on this page to complete. 256 * PG_uptodate tells whether the page's contents is valid. 257 * When a read completes, the page becomes uptodate, unless a disk I/O 258 * error happened. 259 * 260 * For choosing which pages to swap out, inode pages carry a 261 * PG_referenced bit, which is set any time the system accesses 262 * that page through the (mapping,index) hash table. This referenced 263 * bit, together with the referenced bit in the page tables, is used 264 * to manipulate page->age and move the page across the active, 265 * inactive_dirty and inactive_clean lists. 266 * 267 * Note that the referenced bit, the page->lru list_head and the 268 * active, inactive_dirty and inactive_clean lists are protected by 269 * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit! 270 * 271 * PG_skip is used on sparc/sparc64 architectures to "skip" certain 272 * parts of the address space. 273 * 274 * PG_error is set to indicate that an I/O error occurred on this page. 275 * 276 * PG_arch_1 is an architecture specific page state bit. The generic 277 * code guarantees that this bit is cleared for a page when it first 278 * is entered into the page cache. 279 * 280 * PG_highmem pages are not permanently mapped into the kernel virtual 281 * address space, they need to be kmapped separately for doing IO on 282 * the pages. The struct page (these bits with information) are always 283 * mapped into kernel address space... 284 */ 285#define PG_locked 0 /* Page is locked. Don't touch. */ 286#define PG_error 1 287#define PG_referenced 2 288#define PG_uptodate 3 289#define PG_dirty 4 290#define PG_unused 5 291#define PG_lru 6 292#define PG_active 7 293#define PG_slab 8 294#define PG_skip 10 295#define PG_highmem 11 296#define PG_checked 12 /* kill me in 2.5.<early>. */ 297#define PG_arch_1 13 298#define PG_reserved 14 299#define PG_launder 15 /* written out by VM pressure.. */ 300 301/* Make it prettier to test the above... */ 302#define UnlockPage(page) unlock_page(page) 303#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) 304#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) 305#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) 306#define PageDirty(page) test_bit(PG_dirty, &(page)->flags) 307#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) 308#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) 309#define PageLocked(page) test_bit(PG_locked, &(page)->flags) 310#define LockPage(page) set_bit(PG_locked, &(page)->flags) 311#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) 312#define PageChecked(page) test_bit(PG_checked, &(page)->flags) 313#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) 314#define PageLaunder(page) test_bit(PG_launder, &(page)->flags) 315#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) 316#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) 317 318/* 319 * The zone field is never updated after free_area_init_core() 320 * sets it, so none of the operations on it need to be atomic. 321 */ 322#define NODE_SHIFT 4 323#define ZONE_SHIFT (BITS_PER_LONG - 8) 324 325struct zone_struct; 326extern struct zone_struct *zone_table[]; 327 328static inline zone_t *page_zone(struct page *page) 329{ 330 return zone_table[page->flags >> ZONE_SHIFT]; 331} 332 333static inline void set_page_zone(struct page *page, unsigned long zone_num) 334{ 335 page->flags &= ~(~0UL << ZONE_SHIFT); 336 page->flags |= zone_num << ZONE_SHIFT; 337} 338 339/* 340 * In order to avoid #ifdefs within C code itself, we define 341 * set_page_address to a noop for non-highmem machines, where 342 * the field isn't useful. 343 * The same is true for page_address() in arch-dependent code. 344 */ 345#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) 346 347#define set_page_address(page, address) \ 348 do { \ 349 (page)->virtual = (address); \ 350 } while(0) 351 352#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ 353#define set_page_address(page, address) do { } while(0) 354#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ 355 356/* 357 * Permanent address of a page. Obviously must never be 358 * called on a highmem page. 359 */ 360#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) 361 362#define page_address(page) ((page)->virtual) 363 364#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ 365 366#define page_address(page) \ 367 __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \ 368 + page_zone(page)->zone_start_paddr) 369 370#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ 371 372extern void FASTCALL(set_page_dirty(struct page *)); 373 374/* 375 * The first mb is necessary to safely close the critical section opened by the 376 * TryLockPage(), the second mb is necessary to enforce ordering between 377 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 378 * parallel wait_on_page). 379 */ 380#define PageError(page) test_bit(PG_error, &(page)->flags) 381#define SetPageError(page) set_bit(PG_error, &(page)->flags) 382#define ClearPageError(page) clear_bit(PG_error, &(page)->flags) 383#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags) 384#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags) 385#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags) 386#define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) 387#define PageSlab(page) test_bit(PG_slab, &(page)->flags) 388#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags) 389#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags) 390#define PageReserved(page) test_bit(PG_reserved, &(page)->flags) 391 392#define PageActive(page) test_bit(PG_active, &(page)->flags) 393#define SetPageActive(page) set_bit(PG_active, &(page)->flags) 394#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) 395 396#define PageLRU(page) test_bit(PG_lru, &(page)->flags) 397#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags) 398#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags) 399 400#ifdef CONFIG_HIGHMEM 401#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags) 402#else 403#define PageHighMem(page) 0 /* needed to optimize away at compile time */ 404#endif 405 406#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) 407#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags) 408 409/* 410 * Error return values for the *_nopage functions 411 */ 412#define NOPAGE_SIGBUS (NULL) 413#define NOPAGE_OOM ((struct page *) (-1)) 414 415/* The array of struct pages */ 416extern mem_map_t * mem_map; 417 418/* 419 * There is only one page-allocator function, and two main namespaces to 420 * it. The alloc_page*() variants return 'struct page *' and as such 421 * can allocate highmem pages, the *get*page*() variants return 422 * virtual kernel addresses to the allocated page(s). 423 */ 424extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order)); 425extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)); 426extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order); 427 428static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order) 429{ 430 /* 431 * Gets optimized away by the compiler. 432 */ 433 if (order >= MAX_ORDER) 434 return NULL; 435 return _alloc_pages(gfp_mask, order); 436} 437 438#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 439 440extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned int order)); 441extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask)); 442 443#define __get_free_page(gfp_mask) \ 444 __get_free_pages((gfp_mask),0) 445 446#define __get_dma_pages(gfp_mask, order) \ 447 __get_free_pages((gfp_mask) | GFP_DMA,(order)) 448 449/* 450 * The old interface name will be removed in 2.5: 451 */ 452#define get_free_page get_zeroed_page 453 454/* 455 * There is only one 'core' page-freeing function. 456 */ 457extern void FASTCALL(__free_pages(struct page *page, unsigned int order)); 458extern void FASTCALL(free_pages(unsigned long addr, unsigned int order)); 459 460#define __free_page(page) __free_pages((page), 0) 461#define free_page(addr) free_pages((addr),0) 462 463extern void show_free_areas(void); 464extern void show_free_areas_node(pg_data_t *pgdat); 465 466extern void clear_page_tables(struct mm_struct *, unsigned long, int); 467 468extern int fail_writepage(struct page *); 469struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused); 470struct file *shmem_file_setup(char * name, loff_t size); 471extern void shmem_lock(struct file * file, int lock); 472extern int shmem_zero_setup(struct vm_area_struct *); 473 474extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size); 475extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); 476extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot); 477extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot); 478 479extern int vmtruncate(struct inode * inode, loff_t offset); 480extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); 481extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); 482extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); 483extern int make_pages_present(unsigned long addr, unsigned long end); 484extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 485extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len); 486extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len); 487extern int ptrace_attach(struct task_struct *tsk); 488extern int ptrace_detach(struct task_struct *, unsigned int); 489extern void ptrace_disable(struct task_struct *); 490extern int ptrace_check_attach(struct task_struct *task, int kill); 491 492int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 493 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 494 495/* 496 * On a two-level page table, this ends up being trivial. Thus the 497 * inlining and the symmetry break with pte_alloc() that does all 498 * of this out-of-line. 499 */ 500static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 501{ 502 if (pgd_none(*pgd)) 503 return __pmd_alloc(mm, pgd, address); 504 return pmd_offset(pgd, address); 505} 506 507extern int pgt_cache_water[2]; 508extern int check_pgt_cache(void); 509 510extern void free_area_init(unsigned long * zones_size); 511extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, 512 unsigned long * zones_size, unsigned long zone_start_paddr, 513 unsigned long *zholes_size); 514extern void mem_init(void); 515extern void show_mem(void); 516extern void si_meminfo(struct sysinfo * val); 517extern void swapin_readahead(swp_entry_t); 518 519extern struct address_space swapper_space; 520#define PageSwapCache(page) ((page)->mapping == &swapper_space) 521 522static inline int is_page_cache_freeable(struct page * page) 523{ 524 return page_count(page) - !!page->buffers == 1; 525} 526 527extern int can_share_swap_page(struct page *); 528extern int remove_exclusive_swap_page(struct page *); 529 530extern void __free_pte(pte_t); 531 532/* mmap.c */ 533extern void lock_vma_mappings(struct vm_area_struct *); 534extern void unlock_vma_mappings(struct vm_area_struct *); 535extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 536extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 537extern void build_mmap_rb(struct mm_struct *); 538extern void exit_mmap(struct mm_struct *); 539 540extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 541 542extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 543 unsigned long len, unsigned long prot, 544 unsigned long flag, unsigned long pgoff); 545 546static inline unsigned long do_mmap(struct file *file, unsigned long addr, 547 unsigned long len, unsigned long prot, 548 unsigned long flag, unsigned long offset) 549{ 550 unsigned long ret = -EINVAL; 551 if ((offset + PAGE_ALIGN(len)) < offset) 552 goto out; 553 if (!(offset & ~PAGE_MASK)) 554 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 555out: 556 return ret; 557} 558 559extern int do_munmap(struct mm_struct *, unsigned long, size_t); 560 561extern unsigned long do_brk(unsigned long, unsigned long); 562 563static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev) 564{ 565 prev->vm_next = vma->vm_next; 566 rb_erase(&vma->vm_rb, &mm->mm_rb); 567 if (mm->mmap_cache == vma) 568 mm->mmap_cache = prev; 569} 570 571static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags) 572{ 573 if (!vma->vm_file && vma->vm_flags == vm_flags) 574 return 1; 575 else 576 return 0; 577} 578 579struct zone_t; 580/* filemap.c */ 581extern void remove_inode_page(struct page *); 582extern unsigned long page_unuse(struct page *); 583extern void truncate_inode_pages(struct address_space *, loff_t); 584 585/* generic vm_area_ops exported for stackable file systems */ 586extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int); 587extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); 588 589/* 590 * GFP bitmasks.. 591 */ 592/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */ 593#define __GFP_DMA 0x01 594#define __GFP_HIGHMEM 0x02 595 596/* Action modifiers - doesn't change the zoning */ 597#define __GFP_WAIT 0x10 /* Can wait and reschedule? */ 598#define __GFP_HIGH 0x20 /* Should access emergency pools? */ 599#define __GFP_IO 0x40 /* Can start low memory physical IO? */ 600#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */ 601#define __GFP_FS 0x100 /* Can call down to low-level FS? */ 602 603#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO) 604#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT) 605#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO) 606#define GFP_ATOMIC (__GFP_HIGH) 607#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 608#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM) 609#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 610#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 611#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 612 613/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some 614 platforms, used as appropriate on others */ 615 616#define GFP_DMA __GFP_DMA 617 618static inline unsigned int pf_gfp_mask(unsigned int gfp_mask) 619{ 620 /* avoid all memory balancing I/O methods if this task cannot block on I/O */ 621 if (current->flags & PF_NOIO) 622 gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS); 623 624 return gfp_mask; 625} 626 627/* vma is the first one with address < vma->vm_end, 628 * and even address < vma->vm_start. Have to extend vma. */ 629static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) 630{ 631 unsigned long grow; 632 633 /* 634 * vma->vm_start/vm_end cannot change under us because the caller is required 635 * to hold the mmap_sem in write mode. We need to get the spinlock only 636 * before relocating the vma range ourself. 637 */ 638 address &= PAGE_MASK; 639 spin_lock(&vma->vm_mm->page_table_lock); 640 grow = (vma->vm_start - address) >> PAGE_SHIFT; 641 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || 642 ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { 643 spin_unlock(&vma->vm_mm->page_table_lock); 644 return -ENOMEM; 645 } 646 vma->vm_start = address; 647 vma->vm_pgoff -= grow; 648 vma->vm_mm->total_vm += grow; 649 if (vma->vm_flags & VM_LOCKED) 650 vma->vm_mm->locked_vm += grow; 651 spin_unlock(&vma->vm_mm->page_table_lock); 652 return 0; 653} 654 655/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 656extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 657extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 658 struct vm_area_struct **pprev); 659 660/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 661 NULL if none. Assume start_addr < end_addr. */ 662static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 663{ 664 struct vm_area_struct * vma = find_vma(mm,start_addr); 665 666 if (vma && end_addr <= vma->vm_start) 667 vma = NULL; 668 return vma; 669} 670 671extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr); 672 673extern struct page * vmalloc_to_page(void *addr); 674 675#endif /* __KERNEL__ */ 676 677#endif 678