Searched refs:umem (Results 1 - 25 of 42) sorted by relevance

12

/freebsd-11-stable/sys/ofed/include/rdma/
H A Dib_umem.h61 /* Returns the offset of the umem start relative to the first page. */
62 static inline int ib_umem_offset(struct ib_umem *umem) argument
64 return umem->address & ((unsigned long)umem->page_size - 1);
67 /* Returns the first page of an ODP umem. */
68 static inline unsigned long ib_umem_start(struct ib_umem *umem) argument
70 return umem->address - ib_umem_offset(umem);
73 /* Returns the address of the page after the last one of an ODP umem. */
74 static inline unsigned long ib_umem_end(struct ib_umem *umem) argument
79 ib_umem_num_pages(struct ib_umem *umem) argument
102 ib_umem_release(struct ib_umem *umem) argument
103 ib_umem_page_count(struct ib_umem *umem) argument
104 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) argument
[all...]
H A Dib_umem_odp.h52 * An array of the pages included in the on-demand paging umem.
66 * umem, allowing only a single thread to map/unmap pages. The mutex
80 struct ib_umem *umem; member in struct:ib_umem_odp
91 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
93 void ib_umem_odp_release(struct ib_umem *umem);
108 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
111 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
156 struct ib_umem *umem)
161 static inline void ib_umem_odp_release(struct ib_umem *umem) {} argument
155 ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) argument
/freebsd-11-stable/sys/ofed/drivers/infiniband/core/
H A Dib_umem.c53 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) argument
59 if (umem->nmap > 0)
60 ib_dma_unmap_sg(dev, umem->sg_head.sgl,
61 umem->nmap,
64 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
70 sg_free_table(&umem->sg_head);
90 struct ib_umem *umem; local
120 umem = kzalloc(sizeof *umem, GFP_KERNE
236 struct ib_umem *umem = container_of(work, struct ib_umem, work); local
249 ib_umem_release(struct ib_umem *umem) argument
302 ib_umem_page_count(struct ib_umem *umem) argument
332 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) argument
[all...]
H A Dib_umem_odp.c51 /* Only update private counters for this umem if it has them.
52 * Otherwise skip it. All page faults will be delayed for this umem. */
69 /* Only update private counters for this umem if it has them.
70 * Otherwise skip it. All page faults will be delayed for this umem. */
134 /* Make sure that the fact the umem is dying is out before we release
245 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) argument
264 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
265 if (!umem->odp_data) {
269 umem
350 ib_umem_odp_release(struct ib_umem *umem) argument
435 ib_umem_odp_map_dma_single_page( struct ib_umem *umem, int page_index, u64 base_virt_addr, struct page *page, u64 access_mask, unsigned long current_seq) argument
521 ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, u64 access_mask, unsigned long current_seq) argument
624 ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, u64 bound) argument
[all...]
H A Dib_umem_rbtree.c57 return ib_umem_start(umem_odp->umem);
63 * in the umem.
70 return ib_umem_end(umem_odp->umem) - 1;
86 struct ib_umem_odp *umem; local
93 umem = container_of(node, struct ib_umem_odp, interval_tree);
94 ret_val = cb(umem->umem, start, last, cookie) || ret_val;
/freebsd-11-stable/cddl/lib/libumem/
H A DMakefile5 LIB= umem
6 SRCS= umem.c
/freebsd-11-stable/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_doorbell.c39 struct ib_umem *umem; member in struct:mlx4_ib_user_db_page
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
66 if (IS_ERR(page->umem)) {
67 err = PTR_ERR(page->umem);
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
91 ib_umem_release(db->u.user_page->umem);
H A Dmlx4_ib_mr.c80 mr->umem = NULL;
94 struct ib_umem *umem)
109 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
113 umem->page_size * k;
153 mr->umem = ib_umem_get(pd->uobject->context, start, length,
155 if (IS_ERR(mr->umem)) {
156 err = PTR_ERR(mr->umem);
160 n = ib_umem_page_count(mr->umem);
161 shift = ilog2(mr->umem
93 mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) argument
[all...]
H A Dmlx4_ib_srq.c116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
118 if (IS_ERR(srq->umem)) {
119 err = PTR_ERR(srq->umem);
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
124 ilog2(srq->umem->page_size), &srq->mtt);
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
219 ib_umem_release(srq->umem);
287 ib_umem_release(msrq->umem);
H A Dmlx4_ib_cq.c139 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
145 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
147 if (IS_ERR(*umem))
148 return PTR_ERR(*umem);
150 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
151 ilog2((*umem)->page_size), &buf->mtt);
155 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
165 ib_umem_release(*umem);
211 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
270 ib_umem_release(cq->umem);
138 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument
[all...]
/freebsd-11-stable/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_doorbell.c36 struct ib_umem *umem; member in struct:mlx5_ib_user_db_page
61 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
63 if (IS_ERR(page->umem)) {
64 err = PTR_ERR(page->umem);
72 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
88 ib_umem_release(db->u.user_page->umem);
H A Dmlx5_ib_mem.c33 /* @umem: umem object to scan
35 * @count: number of PAGE_SIZE pages covered by umem
40 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, argument
54 unsigned long page_shift = ilog2(umem->page_size);
57 if (umem->odp_data) {
58 *count = ib_umem_page_count(umem);
73 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
133 * Populate the given array with bus addresses from the umem
145 __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, int page_shift, size_t offset, size_t num_pages, __be64 *pas, int access_flags) argument
194 mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, int page_shift, __be64 *pas, int access_flags) argument
[all...]
H A Dmlx5_ib_mr.c82 if (mr->umem->odp_data) {
85 * setting of umem->odp_data->private to point to our
91 mr->umem->odp_data->private = mr;
94 * umem->odp_data->private value in the invalidation
99 * before umem->odp_data->private == mr is visible to
517 mr->umem = NULL;
545 static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, argument
563 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
633 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length, local
635 if (IS_ERR(umem)) {
669 reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) argument
755 struct ib_umem *umem = mr->umem; local
874 reg_create(struct ib_mr *ibmr, struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, int page_shift, int access_flags) argument
967 struct ib_umem *umem; local
1438 struct ib_umem *umem = mr->umem; local
[all...]
H A Dmlx5_ib_srq.c108 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
110 if (IS_ERR(srq->umem)) {
111 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
112 err = PTR_ERR(srq->umem);
116 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
131 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
151 ib_umem_release(srq->umem);
223 ib_umem_release(srq->umem);
407 ib_umem_release(msrq->umem);
H A Dmlx5_ib.h233 struct ib_umem *umem; member in struct:mlx5_ib_rwq
284 struct ib_umem *umem; member in struct:mlx5_ib_ubuffer
389 struct ib_umem *umem; member in struct:mlx5_ib_cq_buf
428 struct ib_umem *umem; member in struct:mlx5_shared_mr_info
472 struct ib_umem *umem; member in struct:mlx5_ib_srq
500 struct ib_umem *umem; member in struct:mlx5_ib_mr
887 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
889 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
892 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
898 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u6
[all...]
/freebsd-11-stable/sys/ofed/drivers/infiniband/ulp/sdp/
H A Dsdp_zcopy.c62 BUG_ON(!tx_sa->umem);
63 BUG_ON(!tx_sa->umem->chunk_list.next);
65 chunk = list_entry(tx_sa->umem->chunk_list.next, struct ib_umem_chunk, list);
68 off = tx_sa->umem->offset;
69 len = tx_sa->umem->length;
84 payload_len = MIN(tx_sa->umem->page_size - off, len);
106 tx_sa->bytes_sent = tx_sa->umem->length;
421 struct ib_umem *umem; local
445 umem = ib_umem_get(&sdp_sk(sk)->context, (unsigned long)uaddr, len,
448 if (IS_ERR(umem)) {
[all...]
/freebsd-11-stable/sys/netsmb/
H A Dsmb_subr.h103 void *smb_memdup(const void *umem, int len);
105 void *smb_memdupin(void *umem, size_t len);
H A Dsmb_subr.c129 smb_memdupin(void *umem, size_t len) argument
136 if (copyin(umem, p, len) == 0)
146 smb_memdup(const void *umem, int len) argument
155 bcopy(umem, p, len);
/freebsd-11-stable/cddl/usr.bin/ztest/
H A DMakefile20 LIBADD= geom m nvpair umem zpool pthread avl zfs_core zfs uutil
/freebsd-11-stable/cddl/usr.bin/zinject/
H A DMakefile22 LIBADD= geom m nvpair umem uutil zfs_core zfs zpool
/freebsd-11-stable/cddl/usr.bin/zstreamdump/
H A DMakefile19 LIBADD= m nvpair umem zpool pthread z avl
/freebsd-11-stable/sbin/zfsbootcfg/
H A DMakefile10 LIBADD+=umem
/freebsd-11-stable/cddl/usr.sbin/zdb/
H A DMakefile26 LIBADD= nvpair umem uutil zfs zpool
/freebsd-11-stable/sys/dev/cxgbe/iw_cxgbe/
H A Dmem.c480 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
481 if (IS_ERR(mhp->umem)) {
482 err = PTR_ERR(mhp->umem);
487 shift = ffs(mhp->umem->page_size) - 1;
489 n = mhp->umem->nmap;
501 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
505 mhp->umem->page_size * k);
546 ib_umem_release(mhp->umem);
719 if (mhp->umem)
[all...]
/freebsd-11-stable/sys/dev/cxgb/ulp/iw_cxgb/
H A Diw_cxgb_provider.c393 if (mhp->umem)
394 ib_umem_release(mhp->umem);
568 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569 if (IS_ERR(mhp->umem)) {
570 err = PTR_ERR(mhp->umem);
575 shift = ffs(mhp->umem->page_size) - 1;
577 n = mhp->umem->nmap;
591 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
595 mhp->umem
[all...]

Completed in 231 milliseconds

12