Searched refs:pages (Results 201 - 225 of 933) sorted by relevance

1234567891011>>

/linux-master/lib/
H A Diov_iter.c126 * some or all of the pages in @i aren't in memory.
334 * However, we mostly deal with order-0 pages and thus can
564 * range since we might then be straying into pages that
643 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
650 * Set up an I/O iterator to either draw data out of the pages attached to an
651 * inode or to inject data into those pages. The pages *must* be prevented
890 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, argument
908 pages[ret] = find_subpage(page, xas.xa_index);
909 get_page(pages[re
917 iter_xarray_get_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned maxpages, size_t *_start_offset) argument
981 __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, size_t *start) argument
1042 iov_iter_get_pages2(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument
1053 iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument
1405 iov_iter_extract_xarray_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) argument
1451 iov_iter_extract_bvec_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) argument
1494 iov_iter_extract_kvec_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) argument
1558 iov_iter_extract_user_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) argument
1634 iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) argument
[all...]
/linux-master/block/
H A Dbio-integrity.c45 * metadata. nr_vecs specifies the maximum number of pages containing
303 static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages, argument
311 struct folio *folio = page_folio(pages[i]);
317 if (page_folio(pages[j]) != folio ||
318 pages[j] != pages[j - 1] + 1)
320 unpin_user_page(pages[j]);
325 bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
338 struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages; local
364 pages
[all...]
/linux-master/mm/kmsan/
H A Dkmsan_test.c314 * initialized pages is still considered as initialized.
320 struct page **pages; local
323 kunit_info(test, "pages initialized via vmap (no reports)\n");
325 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
327 pages[i] = alloc_page(GFP_KERNEL);
328 vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
331 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
336 if (pages[i])
337 __free_page(pages[
[all...]
/linux-master/drivers/gpu/drm/
H A Ddrm_cache.c66 static void drm_cache_flush_clflush(struct page *pages[], argument
73 drm_clflush_page(*pages++);
79 * drm_clflush_pages - Flush dcache lines of a set of pages.
80 * @pages: List of pages to be flushed.
81 * @num_pages: Number of pages in the array.
87 drm_clflush_pages(struct page *pages[], unsigned long num_pages) argument
92 drm_cache_flush_clflush(pages, num_pages);
103 struct page *page = pages[i];
/linux-master/drivers/hwmon/pmbus/
H A Disl68137.c182 .pages = 3,
234 info->pages = 2;
243 info->pages = 1;
251 info->pages = 2;
258 info->pages = 1;
/linux-master/fs/netfs/
H A Diterator.c17 * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
28 * allowed on the pages extracted.
41 struct page **pages; local
63 pg_size = array_size(max_pages, sizeof(*pages));
64 pages = (void *)bv + bv_size - pg_size;
67 ret = iov_iter_extract_pages(orig, &pages, count,
71 pr_err("Couldn't get user pages (rc=%zd)\n", ret);
92 bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
/linux-master/arch/s390/kernel/
H A Dvdso.c241 int pages = (end - start) >> PAGE_SHIFT; local
245 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
248 for (i = 0; i < pages; i++)
255 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
257 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
/linux-master/net/sunrpc/
H A Dxdr.c130 kaddr = kmap_atomic(buf->pages[0]);
153 bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE,
189 struct page **pages = xdr->pages; local
196 bvec_set_page(bvec++, *pages++, len, offset);
221 * @pages: vector of struct page pointers
228 struct page **pages, unsigned int base, unsigned int len)
237 xdr->pages = pages;
253 * @pages
227 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) argument
266 _shift_data_left_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) argument
331 _shift_data_right_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) argument
395 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) argument
442 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) argument
493 struct page **pages = buf->pages; local
999 xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, struct rpc_rqst *rqst) argument
1290 xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len) argument
1452 xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len) argument
[all...]
/linux-master/fs/ceph/
H A Daddr.c36 * count dirty pages on the inode. In the absence of snapshots,
41 * with dirty pages (dirty pages implies there is a cap) gets a new
51 * we look for the first capsnap in i_cap_snaps and write out pages in
53 * eventually reaching the "live" or "head" context (i.e., pages that
55 * pages.
253 osd_data->pages, subreq->start,
262 ceph_put_page_vector(osd_data->pages,
382 struct page **pages; local
385 err = iov_iter_get_pages_alloc2(&iter, &pages, le
1006 struct page **pages = NULL, **data_pages; local
1825 struct page *pages[1]; local
1980 struct page **pages; local
[all...]
/linux-master/drivers/misc/
H A Dvmw_balloon.c8 * acts like a "balloon" that can be inflated to reclaim physical pages by
10 * freeing up the underlying machine pages so they can be allocated to
51 /* Maximum number of refused pages we accumulate during inflation cycle */
147 * ballooned pages (up to 512).
149 * pages that are about to be deflated from the
152 * for 2MB pages.
155 * pages.
240 struct list_head pages; member in struct:vmballoon_ctl
315 * @batch_max_pages: maximum pages that can be locked/unlocked.
317 * Indicates the number of pages tha
985 vmballoon_enqueue_page_list(struct vmballoon *b, struct list_head *pages, unsigned int *n_pages, enum vmballoon_page_size_type page_size) argument
1028 vmballoon_dequeue_page_list(struct vmballoon *b, struct list_head *pages, unsigned int *n_pages, enum vmballoon_page_size_type page_size, unsigned int n_req_pages) argument
[all...]
/linux-master/mm/
H A Dswap_state.c67 printk("%lu pages in swap cache\n", total_swapcache_pages());
309 * Passed an array of pages, drop them all from swapcache and then release
312 void free_pages_and_swap_cache(struct encoded_page **pages, int nr) argument
320 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
324 if (unlikely(encoded_page_flags(pages[i]) &
326 refs[folios.nr] = encoded_nr_pages(pages[++i]);
577 unsigned int pages, last_ra; local
584 pages = hits + 2;
585 if (pages == 2) {
592 pages
614 unsigned int hits, pages, max_pages; local
[all...]
/linux-master/fs/ntfs3/
H A Dfile.c565 /* Write out all dirty pages. */
815 * Return: Array of locked pages.
818 struct page **pages, u32 pages_per_frame,
832 page = pages[npages];
843 pages[npages] = page;
864 struct page *page, **pages = NULL; local
884 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
885 if (!pages)
917 pages, pages_per_frame,
923 err = ni_read_frame(ni, frame_vbo, pages,
817 ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, struct page **pages, u32 pages_per_frame, bool *frame_uptodate) argument
[all...]
/linux-master/drivers/firmware/efi/
H A Dcapsule.c119 * How many scatter gather list (block descriptor) pages do we need
120 * to map @count pages?
130 * @sg_pages: array of scatter gather (block descriptor) pages
186 * @pages: an array of capsule data pages
189 * map the capsule described by @capsule with its data in @pages and
203 * subsequent calls to efi_capsule_pending() will return true. @pages
216 int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages) argument
252 PAGE_SIZE - (u64)*pages % PAGE_SIZE);
255 sglist[j].data = *pages
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_internal.c121 struct sg_table *pages)
123 i915_gem_gtt_finish_pages(obj, pages);
124 internal_free_pages(pages);
162 * Mark the object as volatile, such that the pages are marked as
180 * i915_gem_object_create_internal: create an object with volatile pages
187 * shrinker, its pages and data will be discarded. Equally, it is not a full
120 i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, struct sg_table *pages) argument
H A Di915_gem_shmem.c80 * If there's no chance of allocating enough pages for the whole
90 * Get the list of pages out of our struct file. They'll be pinned
138 * kswapd to reclaim our pages (direct reclaim
144 * dirty pages -- unless you try over and over
251 "Failed to DMA remap %zu pages\n",
294 * backing pages, *now*.
298 obj->mm.pages = ERR_PTR(-EFAULT);
316 * leaving only CPU mmapings around) and add those pages to the LRU
368 struct sg_table *pages,
381 drm_clflush_sg(pages);
367 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush) argument
396 i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) argument
412 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_hmm.c38 * all operations on the pages in question are completed, then those pages are
63 * Block for operations on BOs to finish and mark pages as accessed and
170 void *owner, struct page **pages,
234 * Due to default_flags, all pages are HMM_PFN_VALID or
235 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
238 for (i = 0; pages && i < npages; i++)
239 pages[i] = hmm_pfn_to_page(pfns[i]);
168 amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, struct page **pages, struct hmm_range **phmm_range) argument
H A Damdgpu_gart.c43 * in the GPU's address space. System pages can be mapped into
44 * the aperture and look like contiguous pages from the GPU's
45 * perspective. A page table maps the pages in the aperture
46 * to the actual backing pages in system memory.
71 * when pages are taken out of the GART
134 /* assign pages to this device */
139 * then set_memory_wc() could be used as a workaround to mark the pages
288 * amdgpu_gart_unbind - unbind pages from the gart page table
292 * @pages: number of pages t
298 amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) argument
349 amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags, void *dst) argument
385 amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags) argument
[all...]
/linux-master/arch/parisc/mm/
H A Dinit.c151 pmem_ranges[i-1].pages) > MAX_GAP) {
153 printk("Large gap in memory detected (%ld pages). "
157 pmem_ranges[i-1].pages));
171 size = (pmem_ranges[i].pages << PAGE_SHIFT);
201 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
207 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
226 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
232 pmem_holes[npmem_holes++].pages = hole_pages;
235 end_pfn += pmem_ranges[i].pages;
238 pmem_ranges[0].pages
[all...]
/linux-master/include/linux/ceph/
H A Dlibceph.h177 * calculate the number of pages a given length and offset map onto,
316 extern void ceph_release_page_vector(struct page **pages, int num_pages);
317 extern void ceph_put_page_vector(struct page **pages, int num_pages,
320 extern int ceph_copy_user_to_page_vector(struct page **pages,
323 extern void ceph_copy_to_page_vector(struct page **pages,
326 extern void ceph_copy_from_page_vector(struct page **pages,
329 extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
/linux-master/drivers/gpu/drm/tests/
H A Ddrm_gem_shmem_test.c126 * Test pinning backing pages for a shmem GEM object. The test case
127 * succeeds if a suitable number of backing pages are allocated, and
128 * the pages table counter attribute is increased by one.
138 KUNIT_EXPECT_NULL(test, shmem->pages);
146 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
150 KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
153 KUNIT_EXPECT_NULL(test, shmem->pages);
194 * Test exporting a scatter/gather table of pinned pages suitable for
233 * Test pinning pages and exporting a scatter/gather table suitable for
235 * backing pages ar
[all...]
/linux-master/drivers/mtd/nand/raw/
H A Dnand_bbt.c270 res = read_bbt(this, buf, td->pages[i],
278 res = read_bbt(this, buf, td->pages[0],
308 * Scan read data from data+OOB. May traverse multiple pages, interleaving
396 scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
400 td->pages[0], td->version[0]);
405 scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
409 md->pages[0], md->version[0]);
584 td->pages[i] = -1;
598 td->pages[i] = actblock << blocktopage;
610 if (td->pages[
[all...]
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_agp_backend.c67 struct page *page = ttm->pages[i];
72 mem->pages[mem->page_count++] = page;
/linux-master/include/linux/
H A Dkmsan.h50 * kmsan_memblock_free_pages() - handle freeing of memblock pages.
54 * Freed pages are either returned to buddy allocator or held back to be used
55 * as metadata pages.
66 * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
81 * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
85 * KMSAN copies the contents of metadata pages for @src into the metadata pages
86 * for @dst. If @dst has no associated metadata pages, nothing happens.
87 * If @src has no associated metadata pages, @dst metadata pages ar
288 kmsan_vmap_pages_range_noflush( unsigned long start, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) argument
[all...]
/linux-master/kernel/trace/
H A Dtracing_map.h170 void **pages; member in struct:tracing_map_array
174 (array->pages[idx >> array->entry_shift] + \
/linux-master/arch/sparc/kernel/
H A Dpci_fire.c234 unsigned long pages, order, i; local
237 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
238 if (pages == 0UL) {
243 memset((char *)pages, 0, PAGE_SIZE << order);
244 pbm->msi_queues = (void *) pages;
266 unsigned long pages, order; local
269 pages = (unsigned long) pbm->msi_queues;
271 free_pages(pages, order);

Completed in 203 milliseconds

1234567891011>>