1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Page-mapping primitive inline functions
6 *
7 * Copyright 1995 Linus Torvalds
8 */
9
10#include <linux/mm.h>
11#include <linux/fs.h>
12#include <linux/list.h>
13
14#include <asm/system.h>
15#include <asm/pgtable.h>
16#include <linux/highmem.h>
17
18/*
19 * The page cache can done in larger chunks than
20 * one page, because it allows for more efficient
21 * throughput (it can then be mapped into user
22 * space in smaller chunks for same flexibility).
23 *
24 * Or rather, it _will_ be done in larger chunks.
25 */
26#define PAGE_CACHE_SHIFT	PAGE_SHIFT
27#define PAGE_CACHE_SIZE		PAGE_SIZE
28#define PAGE_CACHE_MASK		PAGE_MASK
29#define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
30
31#define page_cache_get(x)	get_page(x)
32#define page_cache_release(x)	__free_page(x)
33
34static inline struct page *page_cache_alloc(struct address_space *x)
35{
36	return alloc_pages(x->gfp_mask, 0);
37}
38
39/*
40 * From a kernel address, get the "struct page *"
41 */
42#define page_cache_entry(x)	virt_to_page(x)
43
44extern unsigned int page_hash_bits;
45#define PAGE_HASH_BITS (page_hash_bits)
46#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
47
48extern atomic_t page_cache_size; /* # of pages currently in the hash table */
49extern struct page **page_hash_table;
50
51extern void page_cache_init(unsigned long);
52
53/*
54 * We use a power-of-two hash table to avoid a modulus,
55 * and get a reasonable hash by knowing roughly how the
56 * inode pointer and indexes are distributed (ie, we
57 * roughly know which bits are "significant")
58 *
59 * For the time being it will work for struct address_space too (most of
60 * them sitting inside the inodes). We might want to change it later.
61 */
62static inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index)
63{
64#define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
65#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
66	return s(i+index) & (PAGE_HASH_SIZE-1);
67#undef i
68#undef s
69}
70
71#define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
72
73extern struct page * __find_get_page(struct address_space *mapping,
74				unsigned long index, struct page **hash);
75#define find_get_page(mapping, index) \
76	__find_get_page(mapping, index, page_hash(mapping, index))
77extern struct page * __find_lock_page (struct address_space * mapping,
78				unsigned long index, struct page **hash);
79extern struct page * find_or_create_page(struct address_space *mapping,
80				unsigned long index, unsigned int gfp_mask);
81
82extern void FASTCALL(lock_page(struct page *page));
83extern void FASTCALL(unlock_page(struct page *page));
84#define find_lock_page(mapping, index) \
85	__find_lock_page(mapping, index, page_hash(mapping, index))
86extern struct page *find_trylock_page(struct address_space *, unsigned long);
87
88extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
89extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
90extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
91
92extern void ___wait_on_page(struct page *);
93
94static inline void wait_on_page(struct page * page)
95{
96	if (PageLocked(page))
97		___wait_on_page(page);
98}
99
100/*
101 * Returns locked page at given index in given cache, creating it if needed.
102 */
103static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
104{
105	return find_or_create_page(mapping, index, mapping->gfp_mask);
106}
107
108
109extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long);
110
111typedef int filler_t(void *, struct page*);
112
113extern struct page *read_cache_page(struct address_space *, unsigned long,
114				filler_t *, void *);
115#endif
116