Searched refs:page_list (Results 1 - 20 of 20) sorted by relevance

/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c121 if (array->page_list[p].page)
122 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
132 if (!array->page_list[p].page)
133 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
135 if (!array->page_list[p].page)
138 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
139 ++array->page_list[p].used;
148 if (--array->page_list[p].used == 0) {
149 free_page((unsigned long) array->page_list[p].page);
150 array->page_list[
[all...]
H A Dmthca_eq.c230 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
484 if (!eq->page_list)
488 eq->page_list[i].buf = NULL;
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
502 if (!eq->page_list[i].buf)
506 pci_unmap_addr_set(&eq->page_list[i], mapping, t);
508 clear_page(eq->page_list[i].buf);
578 if (eq->page_list[
[all...]
H A Dmthca_provider.h54 struct mthca_buf_list *page_list; member in union:mthca_buf
114 struct mthca_buf_list *page_list; member in struct:mthca_eq
H A Dmthca_mr.c712 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, argument
726 /* Trust the user not to pass misaligned data in page_list */
729 if (page_list[i] & ~page_mask)
740 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument
749 err = mthca_check_fmr(fmr, page_list, list_len, iova);
762 __be64 mtt_entry = cpu_to_be64(page_list[i] |
781 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument
789 err = mthca_check_fmr(fmr, page_list, list_len, iova);
807 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
H A Dmthca_provider.c933 u64 *page_list; local
971 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
972 if (!page_list) {
982 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
993 page_list, shift, npages,
998 kfree(page_list);
1003 kfree(page_list);
1069 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1071 pages[i++] = sg_dma_address(&chunk->page_list[
[all...]
H A Dmthca_dev.h191 } *page_list; member in struct:mthca_array
483 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
486 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
H A Dmthca_srq.c77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
H A Dmthca_cq.c164 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
H A Dmthca_qp.c212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
222 return qp->queue.page_list[(qp->send_wqe_offset +
/barrelfish-master/lib/devif/backends/net/mlx4/include/rdma/
H A Dib_fmr_pool.h76 u64 page_list[0]; member in struct:ib_pool_fmr
87 u64 *page_list,
H A Dib_umem.h84 struct scatterlist page_list[0]; member in struct:ib_umem_chunk
H A Dib_verbs.h836 u64 *page_list; member in struct:ib_fast_reg_page_list
876 struct ib_fast_reg_page_list *page_list; member in struct:ib_send_wr::__anon893::__anon897
1268 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1278 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2073 * page_list array that is at least page_list_len in size. The actual
2075 * for initializing the contents of the page_list array before posting
2078 * The page_list array entries must be translated using one of the
2090 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2092 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2147 * @page_list
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dmr.c516 int start_index, int npages, u64 *page_list) {
529 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
538 int start_index, int npages, u64 *page_list) {
556 err = mlx4_write_mtt_chunk(priv, mtt, start_index, chunk, page_list);
561 page_list += chunk;
569 int npages, u64 *page_list) {
592 inbox[i + 2] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
601 page_list += chunk;
607 return __mlx4_write_mtt(priv, mtt, start_index, npages, page_list);
614 u64 *page_list; local
515 mlx4_write_mtt_chunk(struct mlx4_priv *priv, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
537 __mlx4_write_mtt(struct mlx4_priv *priv, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
568 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
[all...]
H A Deq.c128 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
922 eq->page_list = malloc(npages * sizeof *eq->page_list);
923 if (!eq->page_list)
927 eq->page_list[i].buf = NULL;
939 eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t);
940 if (!eq->page_list[i].buf)
944 eq->page_list[i].map = t;
946 memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE);
1002 if (eq->page_list[
[all...]
H A Dmlx4.h394 struct mlx4_buf_list *page_list; member in struct:mlx4_eq
994 int start_index, int npages, uint64_t *page_list);
H A Dmlx4_devif_queue.c1760 return eq->page_list[offset / BASE_PAGE_SIZE].buf
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/core/
H A Dfmr_pool.c114 u64 *page_list,
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
130 !memcmp(page_list, fmr->page_list,
131 page_list_len * sizeof *page_list))
428 * @page_list:List of pages to map
429 * @list_len:Number of pages in @page_list
435 u64 *page_list,
449 page_list,
474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_le
113 ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) argument
434 ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) argument
[all...]
H A Dumem.c60 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
61 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
62 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
121 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
124 struct page *page = sg_page(&chunk->page_list[i]);
138 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
141 struct page *page = sg_page(&chunk->page_list[i]);
174 struct page **page_list;
216 page_list = (struct page **) __get_free_page(GFP_KERNEL);
217 if (!page_list) {
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c77 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
/barrelfish-master/lib/devif/backends/net/mlx4/include/linux/mlx4/
H A Ddevice.h544 struct mlx4_buf_list *page_list; member in struct:mlx4_buf
1040 return (u8 *) buf->page_list[offset >> PAGE_SHIFT].buf
1077 int npages, u64 *page_list);
1337 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,

Completed in 216 milliseconds