• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching refs:mapping

65  *        ->mapping->tree_lock
73 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
86 * ->mapping->tree_lock (__sync_single_inode)
117 * is safe. The caller must hold the mapping's tree_lock.
121 struct address_space *mapping = page->mapping;
123 radix_tree_delete(&mapping->page_tree, page->index);
124 page->mapping = NULL;
125 mapping->nrpages--;
138 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
140 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
146 struct address_space *mapping = page->mapping;
150 spin_lock_irq(&mapping->tree_lock);
152 spin_unlock_irq(&mapping->tree_lock);
159 struct address_space *mapping;
186 mapping = page_mapping(page);
187 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
188 mapping->a_ops->sync_page(page);
200 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
201 * @mapping: address space structure to write
206 * Start writeback against all of a mapping's dirty pages that lie
214 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
225 if (!mapping_cap_writeback_dirty(mapping))
228 ret = do_writepages(mapping, &wbc);
232 static inline int __filemap_fdatawrite(struct address_space *mapping,
235 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
238 int filemap_fdatawrite(struct address_space *mapping)
240 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
244 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
247 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
253 * @mapping: target address_space
258 int filemap_flush(struct address_space *mapping)
260 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
266 * @mapping: address space structure to wait for
273 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
287 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
308 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
310 if (test_and_clear_bit(AS_EIO, &mapping->flags))
319 * @mapping: address space structure to wait for
324 int filemap_fdatawait(struct address_space *mapping)
326 loff_t i_size = i_size_read(mapping->host);
331 return filemap_fdatawait_range(mapping, 0, i_size - 1);
335 int filemap_write_and_wait(struct address_space *mapping)
339 if (mapping->nrpages) {
340 err = filemap_fdatawrite(mapping);
348 int err2 = filemap_fdatawait(mapping);
359 * @mapping: the address_space for the pages
368 int filemap_write_and_wait_range(struct address_space *mapping,
373 if (mapping->nrpages) {
374 err = __filemap_fdatawrite_range(mapping, lstart, lend,
378 int err2 = filemap_fdatawait_range(mapping,
391 * @mapping: the page's address_space
398 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
413 page->mapping = mapping;
416 spin_lock_irq(&mapping->tree_lock);
417 error = radix_tree_insert(&mapping->page_tree, offset, page);
419 mapping->nrpages++;
423 spin_unlock_irq(&mapping->tree_lock);
425 page->mapping = NULL;
426 spin_unlock_irq(&mapping->tree_lock);
438 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
449 if (mapping_cap_swap_backed(mapping))
452 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
607 * on the page's mapping.
618 * @mapping: the address_space to search
621 * Is there a pagecache struct page at the given (mapping, offset) tuple?
624 struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
632 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
662 * @mapping: the address_space to search
670 struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
675 page = find_get_page(mapping, offset);
679 if (unlikely(page->mapping != mapping)) {
692 * @mapping: the page's address_space
693 * @index: the page's index into the mapping
707 struct page *find_or_create_page(struct address_space *mapping,
713 page = find_lock_page(mapping, index);
724 err = add_to_page_cache_lru(page, mapping, index,
739 * @mapping: The address_space to search
745 * @nr_pages pages in the mapping. The pages are placed at @pages.
748 * The search returns a group of mapping-contiguous pages with ascending
753 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
762 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
795 * @mapping: The address_space to search
805 unsigned BCMFASTPATH_HOST find_get_pages_contig(struct address_space *mapping, pgoff_t index,
814 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
826 if (page->mapping == NULL || page->index != index)
849 * @mapping: the address_space to search
858 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
867 nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
902 * @mapping: target address_space
914 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
916 struct page *page = find_get_page(mapping, index);
924 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
925 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
962 * mapping->a_ops->readpage() function for the actual low-level stuff.
970 struct address_space *mapping = filp->f_mapping;
971 struct inode *inode = mapping->host;
994 page = find_get_page(mapping, index);
996 page_cache_sync_readahead(mapping,
999 page = find_get_page(mapping, index);
1004 page_cache_async_readahead(mapping,
1010 !mapping->a_ops->is_partially_uptodate)
1015 if (!page->mapping)
1017 if (!mapping->a_ops->is_partially_uptodate(page,
1054 if (mapping_writably_mapped(mapping))
1094 if (!page->mapping) {
1114 error = mapping->a_ops->readpage(filp, page);
1129 if (page->mapping == NULL) {
1158 page = page_cache_alloc_cold(mapping);
1163 error = add_to_page_cache_lru(page, mapping,
1289 struct address_space *mapping;
1292 mapping = filp->f_mapping;
1293 inode = mapping->host;
1298 retval = filemap_write_and_wait_range(mapping, pos,
1301 retval = mapping->a_ops->direct_IO(READ, iocb,
1363 do_readahead(struct address_space *mapping, struct file *filp,
1366 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1369 force_page_cache_readahead(mapping, filp, index, nr);
1382 struct address_space *mapping = file->f_mapping;
1386 ret = do_readahead(mapping, file, start, len);
1411 struct address_space *mapping = file->f_mapping;
1416 page = page_cache_alloc_cold(mapping);
1420 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1422 ret = mapping->a_ops->readpage(file, page);
1445 struct address_space *mapping = file->f_mapping;
1453 page_cache_sync_readahead(mapping, ra, file, offset,
1476 ra_submit(ra, mapping, file);
1490 struct address_space *mapping = file->f_mapping;
1498 page_cache_async_readahead(mapping, ra, file,
1518 struct address_space *mapping = file->f_mapping;
1520 struct inode *inode = mapping->host;
1533 page = find_get_page(mapping, offset);
1543 if (unlikely(page->mapping != mapping)) {
1554 page = find_lock_page(mapping, offset);
1613 error = mapping->a_ops->readpage(file, page);
1638 struct address_space *mapping = file->f_mapping;
1640 if (!mapping->a_ops->readpage)
1671 static struct page *__read_cache_page(struct address_space *mapping,
1680 page = find_get_page(mapping, index);
1685 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1702 static struct page *do_read_cache_page(struct address_space *mapping,
1713 page = __read_cache_page(mapping, index, filler, data, gfp);
1720 if (!page->mapping) {
1741 * @mapping: the page's address_space
1754 struct page *read_cache_page_async(struct address_space *mapping,
1759 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1777 * @mapping: the page's address_space
1781 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1789 struct page *read_cache_page_gfp(struct address_space *mapping,
1793 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1795 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1801 * @mapping: the page's address_space
1811 struct page *read_cache_page(struct address_space *mapping,
1816 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
2106 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2110 const struct address_space_operations *aops = mapping->a_ops;
2112 return aops->write_begin(file, mapping, pos, len, flags,
2117 int pagecache_write_end(struct file *file, struct address_space *mapping,
2121 const struct address_space_operations *aops = mapping->a_ops;
2124 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2134 struct address_space *mapping = file->f_mapping;
2135 struct inode *inode = mapping->host;
2146 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2156 if (mapping->nrpages) {
2157 written = invalidate_inode_pages2_range(mapping,
2170 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2180 if (mapping->nrpages) {
2181 invalidate_inode_pages2_range(mapping,
2202 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2211 page = find_lock_page(mapping, index);
2215 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2218 status = add_to_page_cache_lru(page, mapping, index,
2233 struct address_space *mapping = file->f_mapping;
2234 const struct address_space_operations *a_ops = mapping->a_ops;
2273 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2278 if (mapping_writably_mapped(mapping))
2287 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2312 balance_dirty_pages_ratelimited(mapping);
2363 struct address_space * mapping = file->f_mapping;
2366 struct inode *inode = mapping->host;
2382 current->backing_dev_info = mapping->backing_dev_info;
2437 invalidate_mapping_pages(mapping,
2510 struct address_space * const mapping = page->mapping;
2516 if (mapping && mapping->a_ops->releasepage)
2517 return mapping->a_ops->releasepage(page, gfp_mask);