Searched refs:uctx (Results 1 - 25 of 47) sorted by relevance

12

/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_context.c122 struct vmw_user_context *uctx)
130 spin_lock(&uctx->cotable_lock);
131 res = uctx->cotables[i];
132 uctx->cotables[i] = NULL;
133 spin_unlock(&uctx->cotable_lock);
142 struct vmw_user_context *uctx = local
154 vmw_cmdbuf_res_man_destroy(uctx->man);
156 vmw_binding_state_kill(uctx->cbs);
163 vmw_context_cotables_unref(dev_priv, uctx);
186 struct vmw_user_context *uctx local
121 vmw_context_cotables_unref(struct vmw_private *dev_priv, struct vmw_user_context *uctx) argument
375 struct vmw_user_context *uctx = local
553 struct vmw_user_context *uctx = local
584 struct vmw_user_context *uctx = local
808 struct vmw_user_context *uctx = local
862 struct vmw_user_context *uctx = local
895 struct vmw_user_context *uctx = local
[all...]
/linux-master/drivers/scsi/qla2xxx/
H A Dqla_nvme.c242 struct qla_nvme_unsol_ctx *uctx = sp->priv; local
246 if (!uctx) {
251 spin_lock_irqsave(&uctx->cmd_lock, flags);
252 uctx->sp = NULL;
254 spin_unlock_irqrestore(&uctx->cmd_lock, flags);
256 fd_rsp = uctx->fd_rsp;
258 list_del(&uctx->elem);
261 kfree(uctx);
267 struct qla_nvme_unsol_ctx *uctx = local
270 kref_put(&uctx
275 struct qla_nvme_unsol_ctx *uctx = sp->priv; local
364 struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp, local
1182 struct qla_nvme_unsol_ctx *uctx = item->purls_context; local
1231 struct qla_nvme_unsol_ctx *uctx; local
[all...]
H A Dqla_nvme.h41 struct qla_nvme_unsol_ctx *uctx; member in struct:qla_nvme_rport
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dresource.c108 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) argument
114 mutex_lock(&uctx->lock);
115 if (!list_empty(&uctx->cqids)) {
116 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
133 list_add_tail(&entry->entry, &uctx->cqids);
144 list_add_tail(&entry->entry, &uctx->qpids);
150 list_add_tail(&entry->entry, &uctx->qpids);
154 mutex_unlock(&uctx->lock);
164 struct c4iw_dev_ucontext *uctx)
173 mutex_lock(&uctx
163 c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) argument
178 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) argument
237 c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) argument
[all...]
H A Dcq.c38 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
67 c4iw_put_cqid(rdev, cq->cqid, uctx);
71 struct c4iw_dev_ucontext *uctx,
77 int user = (uctx != &rdev->uctx);
83 ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
85 cq->cqid = c4iw_get_cqid(rdev, uctx);
179 c4iw_put_cqid(rdev, cq->cqid, uctx);
991 ucontext ? &ucontext->uctx
37 destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) argument
70 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) argument
[all...]
H A Diw_cxgb4.h175 struct c4iw_dev_ucontext uctx; member in struct:c4iw_rdev
523 struct c4iw_dev_ucontext uctx; member in struct:c4iw_ucontext
934 struct c4iw_dev_ucontext *uctx);
953 struct c4iw_dev_ucontext *uctx);
955 struct c4iw_dev_ucontext *uctx);
1011 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1013 struct c4iw_dev_ucontext *uctx);
1014 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1016 struct c4iw_dev_ucontext *uctx);
H A Dqp.c151 struct c4iw_dev_ucontext *uctx, int has_rq)
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
201 struct c4iw_dev_ucontext *uctx,
205 int user = (uctx != &rdev->uctx);
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
410 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
150 destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx, int has_rq) argument
199 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp, int need_rq) argument
2473 free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) argument
2510 alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) argument
[all...]
/linux-master/tools/testing/selftests/powerpc/pmu/sampling_tests/
H A Dmmcr2_fcs_fch_test.c21 ucontext_t *uctx = data; local
23 is_hv = !!(uctx->uc_mcontext.gp_regs[PT_MSR] & MSR_HV);
/linux-master/security/selinux/
H A Dxfrm.c75 struct xfrm_user_sec_ctx *uctx,
83 if (ctxp == NULL || uctx == NULL ||
84 uctx->ctx_doi != XFRM_SC_DOI_LSM ||
85 uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
88 str_len = uctx->ctx_len;
99 memcpy(ctx->ctx_str, &uctx[1], str_len);
280 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
283 struct xfrm_user_sec_ctx *uctx,
286 return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
332 struct xfrm_user_sec_ctx *uctx)
74 selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *uctx, gfp_t gfp) argument
282 selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *uctx, gfp_t gfp) argument
331 selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx) argument
[all...]
/linux-master/security/selinux/include/
H A Dxfrm.h17 struct xfrm_user_sec_ctx *uctx, gfp_t gfp);
23 struct xfrm_user_sec_ctx *uctx);
/linux-master/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c193 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, argument
205 mutex_lock(&uctx->mm_list_lock);
206 list_add_tail(&mm->entry, &uctx->mm_head);
207 mutex_unlock(&uctx->mm_list_lock);
211 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, argument
216 mutex_lock(&uctx->mm_list_lock);
217 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
225 mutex_unlock(&uctx->mm_list_lock);
228 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, argument
234 mutex_lock(&uctx
357 _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, struct ocrdma_ucontext *uctx, struct ib_udata *udata) argument
388 is_ucontext_pd(struct ocrdma_ucontext *uctx, struct ocrdma_pd *pd) argument
403 ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, struct ocrdma_ucontext *uctx, struct ib_udata *udata) argument
430 ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) argument
444 ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) argument
458 ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) argument
465 ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) argument
523 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); local
590 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( local
636 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( local
673 struct ocrdma_ucontext *uctx = NULL; local
929 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( local
972 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( local
[all...]
H A Docrdma_verbs.h64 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
65 void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
H A Docrdma_ah.c196 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
197 ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
/linux-master/drivers/infiniband/hw/usnic/
H A Dusnic_ib_verbs.h65 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
H A Dusnic_ib_verbs.c633 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) argument
635 struct ib_device *ibdev = uctx->device;
636 struct usnic_ib_ucontext *context = to_ucontext(uctx);
663 struct usnic_ib_ucontext *uctx = to_ucontext(context); local
682 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.h159 struct bnxt_re_ucontext *uctx; member in struct:bnxt_re_user_mmap_entry
174 static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx) argument
176 return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
H A Dib_verbs.c558 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, argument
570 entry->uctx = uctx;
574 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
582 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
785 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( local
790 spin_lock_irqsave(&uctx->sh_lock, flag);
791 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
794 spin_unlock_irqrestore(&uctx->sh_lock, flag);
1200 struct bnxt_re_ucontext *uctx)
1198 bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, struct ib_qp_init_attr *init_attr, struct bnxt_re_ucontext *uctx) argument
1257 bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, struct ib_qp_init_attr *init_attr, struct bnxt_re_ucontext *uctx) argument
1303 bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, struct ib_qp_init_attr *init_attr, struct bnxt_re_ucontext *uctx) argument
1355 struct bnxt_re_ucontext *uctx; local
1734 struct bnxt_re_ucontext *uctx; local
2123 struct bnxt_re_ucontext *uctx = local
2955 struct bnxt_re_ucontext *uctx = local
3092 struct bnxt_re_ucontext *uctx = NULL; local
4148 struct bnxt_re_ucontext *uctx = local
4231 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, local
4267 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, local
4333 struct bnxt_re_ucontext *uctx; local
4347 struct bnxt_re_ucontext *uctx; local
4426 struct bnxt_re_ucontext *uctx = entry->uctx; local
4489 struct bnxt_re_ucontext *uctx; local
[all...]
/linux-master/drivers/infiniband/hw/qedr/
H A Dverbs.h45 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
46 void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_main.c343 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) argument
345 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
350 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
357 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, argument
360 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
361 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
404 ret = hns_roce_alloc_uar_entry(uctx);
445 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) argument
447 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
454 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vm
[all...]
H A Dhns_roce_srq.c409 struct hns_roce_ucontext *uctx; local
416 uctx = rdma_udata_to_drv_context(udata,
419 hns_roce_db_unmap_user(uctx, &srq->rdb);
430 struct hns_roce_ucontext *uctx; local
440 uctx = rdma_udata_to_drv_context(udata,
442 ret = hns_roce_db_map_user(uctx, ucmd.db_addr,
H A Dhns_roce_cq.c229 struct hns_roce_ucontext *uctx; local
235 uctx = rdma_udata_to_drv_context(udata,
237 err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
262 struct hns_roce_ucontext *uctx; local
269 uctx = rdma_udata_to_drv_context(udata,
272 hns_roce_db_unmap_user(uctx, &hr_cq->db);
H A Dhns_roce_qp.c814 struct hns_roce_ucontext *uctx = local
823 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
845 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, local
851 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
862 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
876 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
953 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( local
958 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
960 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
1067 struct hns_roce_ucontext *uctx; local
[all...]
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_verbs.c39 static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) argument
113 if (uctx->ext_db.enable) {
118 uctx->ext_db.sdb_off) |
120 uctx->ext_db.rdb_off);
186 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) argument
238 if (uctx->ext_db.enable) {
242 uctx->ext_db.cdb_off);
285 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address, argument
300 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
866 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, argument
912 free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx) argument
924 struct erdma_ucontext *uctx = rdma_udata_to_drv_context( local
1422 erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx) argument
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_verbs.c56 struct siw_ucontext *uctx = to_siw_ctx(ctx); local
69 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
71 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
124 struct siw_ucontext *uctx = to_siw_ctx(base_ctx); local
126 atomic_dec(&uctx->sdev->num_ctx);
265 siw_mmap_entry_insert(struct siw_ucontext *uctx, argument
278 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
308 struct siw_ucontext *uctx = local
443 siw_mmap_entry_insert(uctx, qp->sendq,
454 siw_mmap_entry_insert(uctx, q
592 struct siw_ucontext *uctx = local
[all...]
/linux-master/net/key/
H A Daf_key.c466 struct xfrm_user_sec_ctx *uctx = NULL; local
469 uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
471 if (!uctx)
474 uctx->len = pfkey_sec_ctx_len(sec_ctx);
475 uctx->exttype = sec_ctx->sadb_x_sec_exttype;
476 uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi;
477 uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg;
478 uctx->ctx_len = sec_ctx->sadb_x_ctx_len;
479 memcpy(uctx
1154 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); local
2299 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); local
2401 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); local
3325 struct xfrm_user_sec_ctx *uctx; local
[all...]

Completed in 470 milliseconds

12