/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/ |
H A D | mthca_allocator.c | 121 if (array->page_list[p].page) 122 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; 132 if (!array->page_list[p].page) 133 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); 135 if (!array->page_list[p].page) 138 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; 139 ++array->page_list[p].used; 148 if (--array->page_list[p].used == 0) { 149 free_page((unsigned long) array->page_list[p].page); 150 array->page_list[ [all...] |
H A D | mthca_eq.c | 230 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 484 if (!eq->page_list) 488 eq->page_list[i].buf = NULL; 500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 502 if (!eq->page_list[i].buf) 506 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 508 clear_page(eq->page_list[i].buf); 578 if (eq->page_list[ [all...] |
H A D | mthca_provider.h | 54 struct mthca_buf_list *page_list; member in union:mthca_buf 114 struct mthca_buf_list *page_list; member in struct:mthca_eq
|
H A D | mthca_mr.c | 712 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, argument 726 /* Trust the user not to pass misaligned data in page_list */ 729 if (page_list[i] & ~page_mask) 740 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument 749 err = mthca_check_fmr(fmr, page_list, list_len, iova); 762 __be64 mtt_entry = cpu_to_be64(page_list[i] | 781 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument 789 err = mthca_check_fmr(fmr, page_list, list_len, iova); 807 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
|
H A D | mthca_provider.c | 933 u64 *page_list; local 971 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); 972 if (!page_list) { 982 page_list[n++] = buffer_list[i].addr + ((u64) j << shift); 993 page_list, shift, npages, 998 kfree(page_list); 1003 kfree(page_list); 1069 len = sg_dma_len(&chunk->page_list[j]) >> shift; 1071 pages[i++] = sg_dma_address(&chunk->page_list[ [all...] |
H A D | mthca_dev.h | 191 } *page_list; member in struct:mthca_array 483 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 486 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
|
H A D | mthca_srq.c | 77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
|
H A D | mthca_cq.c | 164 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
|
H A D | mthca_qp.c | 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 222 return qp->queue.page_list[(qp->send_wqe_offset +
|
/barrelfish-master/lib/devif/backends/net/mlx4/include/rdma/ |
H A D | ib_fmr_pool.h | 76 u64 page_list[0]; member in struct:ib_pool_fmr 87 u64 *page_list,
|
H A D | ib_umem.h | 84 struct scatterlist page_list[0]; member in struct:ib_umem_chunk
|
H A D | ib_verbs.h | 836 u64 *page_list; member in struct:ib_fast_reg_page_list 876 struct ib_fast_reg_page_list *page_list; member in struct:ib_send_wr::__anon893::__anon897 1268 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); 1278 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, 2073 * page_list array that is at least page_list_len in size. The actual 2075 * for initializing the contents of the page_list array before posting 2078 * The page_list array entries must be translated using one of the 2090 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. 2092 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 2147 * @page_list [all...] |
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/net/mlx4/ |
H A D | mr.c | 516 int start_index, int npages, u64 *page_list) { 529 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 538 int start_index, int npages, u64 *page_list) { 556 err = mlx4_write_mtt_chunk(priv, mtt, start_index, chunk, page_list); 561 page_list += chunk; 569 int npages, u64 *page_list) { 592 inbox[i + 2] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 601 page_list += chunk; 607 return __mlx4_write_mtt(priv, mtt, start_index, npages, page_list); 614 u64 *page_list; local 515 mlx4_write_mtt_chunk(struct mlx4_priv *priv, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 537 __mlx4_write_mtt(struct mlx4_priv *priv, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 568 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument [all...] |
H A D | eq.c | 128 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 922 eq->page_list = malloc(npages * sizeof *eq->page_list); 923 if (!eq->page_list) 927 eq->page_list[i].buf = NULL; 939 eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t); 940 if (!eq->page_list[i].buf) 944 eq->page_list[i].map = t; 946 memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE); 1002 if (eq->page_list[ [all...] |
H A D | mlx4.h | 394 struct mlx4_buf_list *page_list; member in struct:mlx4_eq 994 int start_index, int npages, uint64_t *page_list);
|
H A D | mlx4_devif_queue.c | 1760 return eq->page_list[offset / BASE_PAGE_SIZE].buf
|
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/core/ |
H A D | fmr_pool.c | 114 u64 *page_list, 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); 130 !memcmp(page_list, fmr->page_list, 131 page_list_len * sizeof *page_list)) 428 * @page_list:List of pages to map 429 * @list_len:Number of pages in @page_list 435 u64 *page_list, 449 page_list, 474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_le 113 ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) argument 434 ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) argument [all...] |
H A D | umem.c | 60 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ 61 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ 62 (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) 121 ib_dma_unmap_sg_attrs(dev, chunk->page_list, 124 struct page *page = sg_page(&chunk->page_list[i]); 138 ib_dma_unmap_sg_attrs(dev, chunk->page_list, 141 struct page *page = sg_page(&chunk->page_list[i]); 174 struct page **page_list; 216 page_list = (struct page **) __get_free_page(GFP_KERNEL); 217 if (!page_list) { [all...] |
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/ |
H A D | doorbell.c | 77 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
|
/barrelfish-master/lib/devif/backends/net/mlx4/include/linux/mlx4/ |
H A D | device.h | 544 struct mlx4_buf_list *page_list; member in struct:mlx4_buf 1040 return (u8 *) buf->page_list[offset >> PAGE_SHIFT].buf 1077 int npages, u64 *page_list); 1337 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
|