Searched refs:page (Results 176 - 200 of 3148) sorted by last modified time

1234567891011>>

/linux-master/drivers/infiniband/hw/mlx5/
H A Drestrack.c21 struct page *page; local
27 page = alloc_page(GFP_KERNEL);
28 if (!page)
43 cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
48 memcpy(data + offset, page_address(page), size);
56 __free_page(page);
H A Dmr.c149 struct mlx5_mkeys_page *page; local
154 page = kzalloc(sizeof(*page), GFP_ATOMIC);
155 if (!page)
158 list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
160 page = list_last_entry(&ent->mkeys_queue.pages_list,
164 page->mkeys[tmp] = mkey;
831 struct mlx5_mkeys_page *page; local
833 page = kzalloc(sizeof(*page), GFP_KERNE
845 struct mlx5_mkeys_page *page; local
[all...]
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_device.h186 /* The minimum page size is 4K for hardware */
261 u32 offset; /* page offset */
262 u32 count; /* page count */
287 unsigned int page_shift; /* buffer page shift */
298 unsigned int ba_pg_shift; /* BA table page shift */
299 unsigned int buf_pg_shift; /* buffer page shift */
300 unsigned int buf_pg_count; /* buffer page count */
396 u32 *page; member in struct:hns_roce_db_pgdir
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_verbs.c627 struct page *pg;
629 /* Failed if buf is not page aligned */
821 struct erdma_user_dbrecords_page *page = NULL; local
826 list_for_each_entry(page, &ctx->dbrecords_page_list, list)
827 if (page->va == (dbrecords_va & PAGE_MASK))
830 page = kmalloc(sizeof(*page), GFP_KERNEL);
831 if (!page) {
836 page->va = (dbrecords_va & PAGE_MASK);
837 page
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_verbs.c1263 struct page *pg;
1477 /* create a page buffer list from a mapped user memory region */
1640 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
/linux-master/drivers/hwtracing/coresight/
H A Dcoresight-trbe.c377 * write to the next "virtually addressed" page beyond the LIMIT.
380 * page than normal. With this we could then adjust the LIMIT
501 * page boundary following it. Keep the tail boundary if that's lower.
690 * keeps a valid page next to the LIMIT and we could potentially
733 struct page **pglist;
737 * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
738 * just a single page, there would not be any room left while writing
739 * into a partially filled TRBE buffer after the page size alignment.
934 * - Set the TRBBASER to the page aligned offset of the current
953 * the next page afte
[all...]
/linux-master/drivers/gpu/drm/xen/
H A Dxen_drm_front_gem.c30 struct page **pages;
50 sizeof(struct page *), GFP_KERNEL);
225 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
/linux-master/drivers/gpu/drm/omapdrm/
H A Domap_gem.c88 struct page **pages;
103 * they are not necessarily page aligned, we reserve one or more small
105 * can create a second page-aligned mapping of parts of the buffer
117 pgoff_t obj_pgoff; /* page offset of obj currently
232 struct page **pages;
268 "%s: failed to map page\n", __func__);
386 struct page *pages[64]; /* XXX is this too much to have on stack? */
395 * that need to be mapped in to fill 4kb wide CPU page. If the slot
447 * pages[i] to NULL to get a dummy page mapped in.. if someone
449 * least it won't be corrupting whatever other random page use
[all...]
/linux-master/drivers/gpu/drm/i915/gvt/
H A Dgtt.c163 * - type of next level page table
164 * - type of entry inside this level page table
169 * give a PTE page table type, then request to get its next level page
171 * and a PTE page table doesn't have a next level page table type,
173 * page table.
647 * PPGTT shadow page table helpers.
722 spt->shadow_page.page = alloc_page(gfp_mask);
723 if (!spt->shadow_page.page) {
2691 void *page; local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dshmem_utils.c60 struct page **pages;
103 struct page *page; local
106 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
108 if (IS_ERR(page))
109 return PTR_ERR(page);
111 vaddr = kmap(page);
114 set_page_dirty(page);
118 mark_page_accessed(page);
119 kunmap(page);
138 struct page *page; local
[all...]
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Dmock_dmabuf.c106 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_pages.c52 * Calculate the supported page-sizes which fit into the given
53 * sg_page_sizes. This will give us the page-sizes which we may be able
275 struct page *stack[32], **pages = stack, *page; local
293 * However, if we make an explicit vmap of the page, that
299 * So if the page is beyond the 32b boundary, make an explicit
319 for_each_sgt_page(page, iter, obj->mm.pages)
320 pages[i++] = page;
537 * sequential page access (where each new request is consecutive
611 /* If this index is in the middle of multi-page s
648 struct page *page; local
[all...]
/linux-master/drivers/gpu/drm/gma500/
H A Dmmu.c38 * If it fails, the caller need to insert the page using a workqueue function,
667 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
/linux-master/drivers/dax/
H A Ddevice.c92 struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); local
94 page = compound_head(page);
95 if (page->mapping)
98 page->mapping = filp->f_mapping;
99 page->index = pgoff + i;
414 "dynamic-dax with pre-populated page map\n");
/linux-master/drivers/block/zram/
H A Dzram_drv.c57 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
163 * further an already recompressed page
206 unsigned long *page; local
208 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
210 page = (unsigned long *)ptr;
211 val = page[0];
213 if (val != page[last_pos])
217 if (val != page[pos])
570 static void read_from_bdev_async(struct zram *zram, struct page *pag
585 read_from_bdev_async(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent) argument
612 struct page *page; local
778 struct page *page; member in struct:zram_work
799 read_from_bdev_sync(struct zram *zram, struct page *page, unsigned long entry) argument
816 read_from_bdev(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent) argument
830 read_from_bdev(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent) argument
1308 zram_read_from_zspool(struct zram *zram, struct page *page, u32 index) argument
1353 zram_read_page(struct zram *zram, struct page *page, u32 index, struct bio *parent) argument
1388 struct page *page = alloc_page(GFP_NOIO); local
1408 zram_write_page(struct zram *zram, struct page *page, u32 index) argument
1539 struct page *page = alloc_page(GFP_NOIO); local
1570 zram_recompress(struct zram *zram, u32 index, struct page *page, u64 *num_recomp_pages, u32 threshold, u32 prio, u32 prio_max) argument
1726 struct page *page; local
[all...]
/linux-master/drivers/accel/ivpu/
H A Divpu_mmu_context.c49 struct page *page; local
52 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
53 if (!page)
56 set_pages_array_wc(&page, 1);
58 dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
62 cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
74 put_page(page);
80 struct page *page; local
[all...]
/linux-master/crypto/
H A Daf_alg.c718 struct page *page = sg_page(sg + i); local
720 if (!page)
729 /* discard page before offset */
732 /* reassign page to dst after offset */
733 get_page(page);
734 sg_set_page(dst + j, page,
751 put_page(page);
998 /* use the existing memory in an allocated page */
1028 /* allocate a new page */
[all...]
/linux-master/arch/xtensa/mm/
H A Dtlb.c127 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) argument
143 invalidate_itlb_mapping(page);
144 invalidate_dtlb_mapping(page);
227 * marked as non-present. Non-present PTE and the page with non-zero refcount
229 * means that the page was freed prematurely. Non-zero mapcount is unusual,
258 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
H A Dcache.c33 #include <asm/page.h>
37 * The kernel provides one architecture bit PG_arch_1 in the page flags that
44 * are coherent. The kernel clears this bit whenever a page is added to the
45 * page cache. At that time, the caches might not be in sync. We, therefore,
52 * page.
59 static inline void kmap_invalidate_coherent(struct page *page, argument
62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
65 if (!PageHighMem(page)) {
66 kvaddr = (unsigned long)page_to_virt(page);
81 coherent_kvaddr(struct page *page, unsigned long base, unsigned long vaddr, unsigned long *paddr) argument
88 clear_user_highpage(struct page *page, unsigned long vaddr) argument
273 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) argument
313 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) argument
[all...]
/linux-master/arch/x86/um/vdso/
H A DMakefile26 -Wl,-z,max-page-size=4096
/linux-master/arch/x86/mm/
H A Dpgtable.c53 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
121 struct mm_struct *pgd_page_get_mm(struct page *page) argument
123 return page_ptdesc(page)->pt_mm;
183 * We allocate separate PMDs for the kernel part of the user page-table
185 * user-space page-table.
361 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
362 * assumes that pgd should be in one page.
365 * only needs to allocate 32 bytes for pgd instead of one page
[all...]
H A Dinit.c16 #include <asm/page.h>
169 * By default need to be able to allocate page tables below PGD firstly for
216 * enable and PPro Global page enable), so that any CPU's that boot
342 * big page size instead small one if nearby are ram too.
384 * 32-bit without PAE has a 4M large page size.
409 /* head if not big page alignment ? */
413 * Don't use a large page for the first 2/4MB of memory
432 /* big page (2M) range */
449 /* big page (1G) range */
459 /* tail is not big page (
[all...]
H A Dfault.c136 * If it was a exec (instruction fetch) fault on NX page, then
147 * not-present page (e.g. due to a race). No one has ever
225 * where it synchronizes this update with the other page-tables in the
231 * which are not mapped in every page-table in the system, causing an
232 * unhandled page-fault when they are accessed.
245 * Synchronize this task's top level page-table
246 * with the 'reference' page table.
274 struct page *page; local
277 list_for_each_entry(page,
[all...]
/linux-master/arch/x86/mm/pat/
H A Dmemtype.c3 * Page Attribute Table (PAT) support: handle memory caching attributes in page tables.
24 * PAT doesn't work via explicit memory ranges, but uses page table entries to add
54 #include <asm/page.h>
107 * X86 PAT uses page flags arch_1 and uncached together to keep track of
108 * memory type of pages that have backing page struct.
126 static inline enum page_cache_mode get_page_memtype(struct page *pg)
140 static inline void set_page_memtype(struct page *pg,
169 static inline enum page_cache_mode get_page_memtype(struct page *pg)
173 static inline void set_page_memtype(struct page *pg,
441 * For RAM pages, we use page flag
455 struct page *page; local
497 struct page *page; local
679 struct page *page; local
[all...]
/linux-master/arch/x86/kernel/
H A Dirq_64.c39 struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];

Completed in 470 milliseconds

1234567891011>>