Searched refs:nr_pages (Results 201 - 225 of 390) sorted by relevance

1234567891011>>

/linux-master/drivers/hwtracing/coresight/
H A Dultrasoc-smb.c308 int nr_pages, bool overwrite)
319 buf->nr_pages = nr_pages;
361 pg_idx %= buf->nr_pages;
306 smb_alloc_buffer(struct coresight_device *csdev, struct perf_event *event, void **pages, int nr_pages, bool overwrite) argument
/linux-master/arch/arm64/mm/
H A Dpageattr.c110 for (i = 0; i < area->nr_pages; i++) {
/linux-master/drivers/iommu/
H A Ds390-iommu.c554 unsigned long nr_pages, int flags,
562 for (i = 0; i < nr_pages; i++) {
590 dma_addr_t dma_addr, unsigned long nr_pages)
596 for (i = 0; i < nr_pages; i++) {
552 s390_iommu_validate_trans(struct s390_domain *s390_domain, phys_addr_t pa, dma_addr_t dma_addr, unsigned long nr_pages, int flags, gfp_t gfp) argument
589 s390_iommu_invalidate_trans(struct s390_domain *s390_domain, dma_addr_t dma_addr, unsigned long nr_pages) argument
/linux-master/mm/
H A Dmempolicy.c2348 struct mempolicy *pol, unsigned long nr_pages,
2359 nr_pages_per_node = nr_pages / nodes;
2360 delta = nr_pages - nodes * nr_pages_per_node;
2383 struct mempolicy *pol, unsigned long nr_pages,
2394 unsigned long rem_pages = nr_pages;
2402 if (!nr_pages)
2415 /* Continue allocating from most recent node and adjust the nr_pages */
2487 if (total_allocated == nr_pages)
2498 struct mempolicy *pol, unsigned long nr_pages,
2508 nr_pages, NUL
2347 alloc_pages_bulk_array_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) argument
2382 alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) argument
2497 alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) argument
2523 alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array) argument
[all...]
H A Dvmalloc.c3219 for (i = 0; i < area->nr_pages; i++)
3238 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3337 for (i = 0; i < vm->nr_pages; i++) {
3349 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3434 area->nr_pages = count;
3497 unsigned int order, unsigned int nr_pages, struct page **pages)
3515 while (nr_allocated < nr_pages) {
3524 nr_pages_request = min(100U, nr_pages - nr_allocated);
3561 while (nr_allocated < nr_pages) {
3642 area->nr_pages
3496 vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int order, unsigned int nr_pages, struct page **pages) argument
4797 unsigned int nr_pages; local
[all...]
/linux-master/drivers/fpga/
H A Dfpga-mgr.c471 int nr_pages; local
487 nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
489 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
494 for (index = 0; index < nr_pages; index++) {
/linux-master/fs/
H A Daio.c126 long nr_pages; member in struct:kioctx
260 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) argument
269 inode->i_size = PAGE_SIZE * nr_pages;
336 for (i = 0; i < ctx->nr_pages; i++) {
443 if (idx < (pgoff_t)ctx->nr_pages) {
496 int nr_pages; local
506 nr_pages = PFN_UP(size);
507 if (nr_pages < 0)
510 file = aio_private_file(ctx, nr_pages);
517 nr_events = (PAGE_SIZE * nr_pages
[all...]
/linux-master/block/
H A Dbio-integrity.c455 unsigned int len, nr_pages; local
491 nr_pages = end - start;
494 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
509 for (i = 0; i < nr_pages && len > 0; i++) {
/linux-master/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h384 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
H A Dehea.h247 u32 nr_pages; member in struct:ehea_eq_attr
292 u32 nr_pages; member in struct:ehea_cq_attr
/linux-master/drivers/target/iscsi/cxgbit/
H A Dcxgbit_ddp.c191 ttinfo->nr_pages = (xferlen + sgl->offset +
197 ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
/linux-master/arch/sh/kernel/cpu/sh4/
H A Dsq.c374 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; local
384 sq_bitmap = bitmap_zalloc(nr_pages, GFP_KERNEL);
/linux-master/drivers/misc/genwqe/
H A Dcard_base.h172 unsigned int nr_pages; /* number of pages */ member in struct:dma_mapping
356 unsigned long nr_pages; member in struct:genwqe_sgl
/linux-master/crypto/
H A Dscompress.c172 int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE); local
176 for (i = 0; i < nr_pages; i++)
/linux-master/drivers/hwtracing/ptt/
H A Dhisi_ptt.c1031 int nr_pages, bool overwrite)
1043 if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
1050 pagelist = kcalloc(nr_pages, sizeof(*pagelist), GFP_KERNEL);
1054 for (i = 0; i < nr_pages; i++)
1057 buf->base = vmap(pagelist, nr_pages, VM_MAP, PAGE_KERNEL);
1063 buf->nr_pages = nr_pages;
1064 buf->length = nr_pages * PAGE_SIZE;
1030 hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) argument
/linux-master/drivers/mtd/devices/
H A Dmtd_dataflash.c621 static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages, argument
646 device->size = nr_pages * pagesize;
679 int nr_pages, int pagesize, int pageoffset)
681 return add_dataflash_otp(spi, name, nr_pages, pagesize,
694 unsigned nr_pages; member in struct:flash_info
862 return add_dataflash_otp(spi, info->name, info->nr_pages,
678 add_dataflash(struct spi_device *spi, char *name, int nr_pages, int pagesize, int pageoffset) argument
/linux-master/arch/arm64/include/asm/
H A Dkvm_host.h88 unsigned long nr_pages; member in struct:kvm_hyp_memcache
97 mc->nr_pages++;
105 if (!mc->nr_pages)
109 mc->nr_pages--;
120 while (mc->nr_pages < min_pages) {
136 while (mc->nr_pages)
/linux-master/drivers/edac/
H A Damd64_edac.c3034 u32 cs_mode, nr_pages; local
3039 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3040 nr_pages <<= 20 - PAGE_SHIFT;
3044 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3046 return nr_pages;
3052 u32 cs_mode, nr_pages; local
3056 nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3057 nr_pages <<= 20 - PAGE_SHIFT;
3061 edac_dbg(0, "nr_pages/channe
3116 int nr_pages = 0; local
3566 u32 nr_pages; local
[all...]
/linux-master/arch/s390/kernel/
H A Dperf_cpum_sf.c1741 * @nr_pages: Total pages
1752 int nr_pages, bool snapshot)
1759 if (!nr_pages || !pages)
1762 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1765 nr_pages);
1767 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1770 nr_pages);
1781 n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE);
1787 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
1803 for (i = 0; i < nr_pages;
1751 aux_buffer_setup(struct perf_event *event, void **pages, int nr_pages, bool snapshot) argument
[all...]
/linux-master/drivers/rapidio/devices/
H A Drio_mport_cdev.c544 unsigned int nr_pages; member in struct:mport_dma_req
582 unpin_user_pages(req->page_list, req->nr_pages);
813 unsigned long nr_pages = 0; local
856 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
858 page_list = kmalloc_array(nr_pages,
867 nr_pages,
871 if (pinned != nr_pages) {
875 nr_pages = 0;
878 pinned, nr_pages);
880 * Set nr_pages u
[all...]
/linux-master/include/linux/
H A Dmm.h2502 unsigned long start, unsigned long nr_pages,
2506 unsigned long start, unsigned long nr_pages,
2540 long get_user_pages(unsigned long start, unsigned long nr_pages,
2542 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2544 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2546 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2549 int get_user_pages_fast(unsigned long start, int nr_pages,
2551 int pin_user_pages_fast(unsigned long start, int nr_pages,
2620 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3915 unsigned long nr_pages, in
3977 unsigned long nr_pages; local
[all...]
/linux-master/drivers/xen/
H A Dgntdev.c104 args.nr_pages = map->count;
183 args.nr_pages = count;
795 unsigned int nr_pages; member in struct:gntdev_copy_batch
811 batch->pages[batch->nr_pages++] = page;
821 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
822 batch->nr_pages = 0;
964 batch.nr_pages = 0;
/linux-master/fs/fuse/
H A Ddax.c1231 long nr_pages, nr_ranges; local
1243 nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
1246 if (nr_pages < 0) {
1247 pr_debug("dax_direct_access() returned %ld\n", nr_pages);
1248 return nr_pages;
1251 nr_ranges = nr_pages/FUSE_DAX_PAGES;
1253 __func__, nr_pages, nr_ranges);
/linux-master/arch/arm64/kvm/hyp/
H A Dpgtable.c1495 int nr_pages; local
1506 nr_pages = stage2_block_get_nr_page_tables(level);
1507 if (nr_pages < 0)
1508 return nr_pages;
1510 if (mc->nobjs >= nr_pages) {
1523 nr_pages = 1;
1526 if (mc->nobjs < nr_pages)
/linux-master/fs/f2fs/
H A Ddata.c1040 unsigned nr_pages, blk_opf_t op_flag,
1050 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
2071 unsigned nr_pages,
2087 last_block = block_in_file + nr_pages;
2150 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2180 unsigned nr_pages, sector_t *last_block_in_bio,
2303 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2371 unsigned nr_pages = rac ? readahead_count(rac) : 1; local
2372 unsigned max_nr_pages = nr_pages;
2385 for (; nr_pages; nr_page
1039 f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, unsigned nr_pages, blk_opf_t op_flag, pgoff_t first_idx, bool for_write) argument
2070 f2fs_read_single_page(struct inode *inode, struct folio *folio, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, bool is_readahead) argument
2179 f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, unsigned nr_pages, sector_t *last_block_in_bio, bool is_readahead, bool for_write) argument
2986 int nr_pages; local
[all...]

Completed in 317 milliseconds

1234567891011>>