Searched refs:page_list (Results 1 - 25 of 76) sorted by relevance

1234

/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c117 if (array->page_list[p].page)
118 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
128 if (!array->page_list[p].page)
129 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
131 if (!array->page_list[p].page)
134 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
135 ++array->page_list[p].used;
144 if (--array->page_list[p].used == 0) {
145 free_page((unsigned long) array->page_list[p].page);
146 array->page_list[
[all...]
H A Dmthca_eq.c231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
482 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
484 if (!eq->page_list)
488 eq->page_list[i].buf = NULL;
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
502 if (!eq->page_list[i].buf)
506 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 clear_page(eq->page_list[i].buf);
572 if (eq->page_list[
[all...]
H A Dmthca_provider.h54 struct mthca_buf_list *page_list; member in union:mthca_buf
96 struct mthca_buf_list *page_list; member in struct:mthca_eq
/linux-master/arch/powerpc/kexec/
H A Dcore_32.c32 unsigned long page_list; local
43 page_list = image->head;
59 relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
63 (*rnk)(page_list, reboot_code_buffer_phys, image->start);
/linux-master/include/linux/
H A Ddm-io.h25 struct page_list { struct
26 struct page_list *next;
45 struct page_list *pl;
/linux-master/drivers/xen/
H A Dunpopulated-alloc.c16 static struct page *page_list; variable in typeref:struct:page
131 pg->zone_device_data = page_list;
132 page_list = pg;
179 struct page *pg = page_list;
182 page_list = pg->zone_device_data;
193 pages[j]->zone_device_data = page_list;
194 page_list = pages[j];
225 pages[i]->zone_device_data = page_list;
226 page_list = pages[i];
/linux-master/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c50 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
51 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
52 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
74 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
89 struct page **page_list; local
117 page_list = (struct page **) __get_free_page(GFP_KERNEL);
118 if (!page_list)
143 gup_flags, page_list);
152 chunk = kmalloc(struct_size(chunk, page_list,
161 sg_init_table(chunk->page_list, chun
[all...]
H A Dusnic_uiom.h80 struct scatterlist page_list[] __counted_by(nents);
/linux-master/drivers/infiniband/core/
H A Dumem.c146 struct page **page_list; local
185 page_list = (struct page **) __get_free_page(GFP_KERNEL);
186 if (!page_list) {
217 gup_flags, page_list);
226 &umem->sgt_append, page_list, pinned, 0,
230 unpin_user_pages_dirty_lock(page_list, pinned, 0);
248 free_page((unsigned long) page_list);
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_validation.h48 * @page_list: List of pages used by the memory allocator
53 * @mem_size_left: Free memory left in the last page in @page_list
54 * @page_address: Kernel virtual address of the last page in @page_list
65 struct list_head page_list; member in struct:vmw_validation_context
97 .page_list = LIST_HEAD_INIT((_name).page_list), \
/linux-master/drivers/comedi/
H A Dcomedi_buf.c28 if (bm->page_list) {
32 * Address is in page_list[0].
34 buf = &bm->page_list[0];
40 buf = &bm->page_list[i];
45 vfree(bm->page_list);
92 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
93 if (!bm->page_list)
111 buf = &bm->page_list[i];
119 buf = &bm->page_list[i];
167 * Address is in page_list[
[all...]
/linux-master/arch/x86/kernel/
H A Dmachine_kexec_32.c163 unsigned long page_list[PAGES_NR]; local
201 page_list[PA_CONTROL_PAGE] = __pa(control_page);
202 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
203 page_list[PA_PGD] = __pa(image->arch.pgd);
206 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
229 (unsigned long)page_list,
H A Dmachine_kexec_64.c297 unsigned long page_list[PAGES_NR]; local
329 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
330 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
331 page_list[PA_TABLE_PAGE] =
335 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
358 (unsigned long)page_list,
/linux-master/mm/
H A Ddmapool.c16 * allocated pages. Each page in the page_list is split into blocks of at
49 struct list_head page_list; member in struct:dma_pool
64 struct list_head page_list; member in struct:dma_page
124 list_for_each_entry(page, &pool->page_list, page_list) {
262 INIT_LIST_HEAD(&retval->page_list);
330 list_add(&page->page_list, &pool->page_list);
382 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
[all...]
/linux-master/arch/sh/kernel/
H A Dmachine_kexec.c71 unsigned long page_list; local
101 page_list = image->head;
118 (*rnk)(page_list, reboot_code_buffer,
/linux-master/arch/arm/kernel/
H A Dmachine_kexec.c166 unsigned long page_list, reboot_entry_phys; local
179 page_list = image->head & PAGE_MASK;
190 data->kexec_indirection_page = page_list;
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c693 int start_index, int npages, u64 *page_list)
710 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
719 int start_index, int npages, u64 *page_list)
734 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
739 page_list += chunk;
747 int start_index, int npages, u64 *page_list)
770 inbox[i + 2] = cpu_to_be64(page_list[i] |
780 page_list += chunk;
786 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
793 u64 *page_list; local
692 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
718 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
746 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
[all...]
H A Dalloc.c619 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
621 if (!buf->page_list)
625 buf->page_list[i].buf =
628 if (!buf->page_list[i].buf)
631 buf->page_list[i].map = t;
653 if (buf->page_list[i].buf)
656 buf->page_list[i].buf,
657 buf->page_list[i].map);
658 kfree(buf->page_list);
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mr.c96 XA_STATE(xas, &mr->page_list, 0);
137 xa_init(&mr->page_list);
161 XA_STATE(xas, &mr->page_list, 0);
165 xa_init(&mr->page_list);
223 err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
255 page = xa_load(&mr->page_list, index);
449 page = xa_load(&mr->page_list, index);
498 page = xa_load(&mr->page_list, index);
556 page = xa_load(&mr->page_list, index);
730 xa_destroy(&mr->page_list);
[all...]
/linux-master/drivers/misc/genwqe/
H A Dcard_utils.c243 struct page **page_list, int num_pages,
254 daddr = dma_map_page(&pci_dev->dev, page_list[i],
537 * page_list and pci_alloc_consistent for the sg_list.
539 * be fixed with some effort. The page_list must be split into
559 /* determine space needed for page_list. */
568 m->page_list = kcalloc(m->nr_pages,
571 if (!m->page_list) {
572 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
578 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
584 m->page_list); /* ptr
242 genwqe_map_pages(struct genwqe_dev *cd, struct page **page_list, int num_pages, dma_addr_t *dma_list) argument
[all...]
/linux-master/drivers/md/
H A Ddm-kcopyd.c61 struct page_list *pages;
96 static struct page_list zero_page_list;
218 static struct page_list *alloc_pl(gfp_t gfp)
220 struct page_list *pl;
235 static void free_pl(struct page_list *pl)
245 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
247 struct page_list *next;
265 unsigned int nr, struct page_list **pages)
267 struct page_list *pl;
296 static void drop_pages(struct page_list *p
[all...]
/linux-master/include/uapi/linux/
H A Dvbox_vmmdev_types.h238 } page_list; member in union:vmmdev_hgcm_function_parameter32::__anon2996
261 } page_list; member in union:vmmdev_hgcm_function_parameter64::__anon3000
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_free_list.c226 u32 *page_list; local
230 page_list = pvr_gem_object_vmap(free_list->obj);
231 if (IS_ERR(page_list))
232 return PTR_ERR(page_list);
248 page_list[offset++] = (u32)dma_pfn;
/linux-master/drivers/net/ethernet/mediatek/
H A Dmtk_wed.c639 struct mtk_wed_buf *page_list; local
652 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
653 if (!page_list)
656 dev->tx_buf_ring.pages = page_list;
684 page_list[page_idx].p = page;
685 page_list[page_idx++].phy_addr = page_phys;
733 struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; local
737 if (!page_list)
744 dma_addr_t page_phy = page_list[page_id
768 struct mtk_wed_buf *page_list; local
850 struct mtk_wed_buf *page_list = dev->hw_rro.pages; local
[all...]
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.c422 struct page **page_list; local
427 to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
428 page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
429 if (!page_list)
434 page_list);
441 &buf->table, page_list, filled, 0,
449 memset(page_list, 0, filled * sizeof(*page_list));
451 PAGE_SIZE / sizeof(*page_list));
[all...]

Completed in 404 milliseconds

1234