Searched refs:ctx_id (Results 1 - 25 of 86) sorted by relevance

1234

/linux-master/include/linux/sunrpc/
H A Dgss_api.h51 struct gss_ctx **ctx_id,
55 struct gss_ctx *ctx_id,
59 struct gss_ctx *ctx_id,
63 struct gss_ctx *ctx_id,
68 struct gss_ctx *ctx_id,
73 struct gss_ctx **ctx_id);
113 struct gss_ctx *ctx_id,
117 struct gss_ctx *ctx_id,
121 struct gss_ctx *ctx_id,
125 struct gss_ctx *ctx_id,
[all...]
/linux-master/drivers/media/platform/mediatek/vcodec/common/
H A Dmtk_vcodec_intr.c19 int ctx_id, ctx_type, status = 0; local
28 ctx_id = ctx->id;
38 ctx_id = ctx->id;
54 ctx_id, command, ctx_type, timeout_ms,
59 ctx_id, command, ctx_type,
H A Dmtk_vcodec_dbgfs.h54 void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dec_dev *vcodec_dev, int ctx_id);
62 static inline void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dec_dev *vcodec_dev, int ctx_id) argument
/linux-master/arch/x86/include/asm/
H A Dmmu.h25 * ctx_id uniquely identifies this mm_struct. A ctx_id will never
26 * be reused, and zero is not a valid ctx_id.
28 u64 ctx_id; member in struct:__anon36
74 .ctx_id = 1, \
/linux-master/drivers/gpu/drm/virtio/
H A Dvirtgpu_trace.h21 __field(u32, ctx_id)
32 __entry->ctx_id = le32_to_cpu(hdr->ctx_id);
36 TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
39 __entry->ctx_id, __entry->num_free, __entry->seqno)
H A Dvirtgpu_drv.h84 uint32_t ctx_id; member in struct:virtio_gpu_object_params
273 uint32_t ctx_id; member in struct:virtio_gpu_fpriv
366 uint32_t ctx_id,
369 uint32_t ctx_id,
373 uint32_t ctx_id,
377 uint32_t ctx_id,
385 uint32_t ctx_id,
H A Dvirtgpu_ioctl.c46 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
54 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
266 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
326 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
478 params->ctx_id = vfpriv->ctx_id;
527 vfpriv->ctx_id, NULL, NULL);
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_sched.c67 unsigned ctx_id,
84 ctx = amdgpu_ctx_get(fpriv, ctx_id);
130 args->in.ctx_id,
65 amdgpu_sched_context_priority_override(struct amdgpu_device *adev, int fd, unsigned ctx_id, int32_t priority) argument
/linux-master/net/sunrpc/auth_gss/
H A Dgss_mech_switch.c355 struct gss_ctx **ctx_id,
359 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
361 (*ctx_id)->mech_type = gss_mech_get(mech);
364 *ctx_id, endtime, gfp_mask);
408 gss_wrap(struct gss_ctx *ctx_id, argument
413 return ctx_id->mech_type->gm_ops
414 ->gss_wrap(ctx_id, offset, buf, inpages);
418 gss_unwrap(struct gss_ctx *ctx_id, argument
423 return ctx_id
353 gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_api_mech *mech, struct gss_ctx **ctx_id, time64_t *endtime, gfp_t gfp_mask) argument
[all...]
/linux-master/drivers/accel/ivpu/
H A Divpu_jsm_msg.h13 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
H A Divpu_mmu_context.h37 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
H A Divpu_drv.c219 u32 ctx_id; local
238 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
245 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
255 ctx_id, current->comm, task_pid_nr(current));
260 xa_erase_irq(&vdev->context_xa, ctx_id);
603 unsigned long ctx_id; local
607 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
/linux-master/drivers/media/platform/st/sti/hva/
H A Dhva-hw.c120 u8 ctx_id = 0; local
130 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 if (ctx_id >= HVA_MAX_INSTANCES) {
133 HVA_PREFIX, __func__, ctx_id);
137 ctx = hva->instances[ctx_id];
223 u8 ctx_id = 0; local
233 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
234 if (ctx_id >= HVA_MAX_INSTANCES) {
236 ctx_id);
240 ctx = hva->instances[ctx_id];
[all...]
/linux-master/sound/soc/fsl/
H A Dfsl_easrc.c240 unsigned int ctx_id, int mem_type)
253 if (ctx_id >= EASRC_CTX_MAX_NUM) {
254 dev_err(dev, "Invalid context id[%d]\n", ctx_id);
258 reg = REG_EASRC_CCE1(ctx_id);
428 static int fsl_easrc_write_pf_coeff_mem(struct fsl_asrc *easrc, int ctx_id, argument
450 ret = fsl_easrc_coeff_mem_ptr_reset(easrc, ctx_id, EASRC_PF_COEFF_MEM);
460 regmap_write(easrc->regmap, REG_EASRC_PCF(ctx_id),
462 regmap_write(easrc->regmap, REG_EASRC_PCF(ctx_id),
470 unsigned int ctx_id)
488 if (ctx_id >
239 fsl_easrc_coeff_mem_ptr_reset(struct fsl_asrc *easrc, unsigned int ctx_id, int mem_type) argument
469 fsl_easrc_prefilter_config(struct fsl_asrc *easrc, unsigned int ctx_id) argument
861 fsl_easrc_config_slot(struct fsl_asrc *easrc, unsigned int ctx_id) argument
924 fsl_easrc_release_slot(struct fsl_asrc *easrc, unsigned int ctx_id) argument
964 fsl_easrc_config_context(struct fsl_asrc *easrc, unsigned int ctx_id) argument
[all...]
/linux-master/arch/x86/mm/
H A Dtlb.c187 * necessary invalidation by clearing out the 'ctx_id' which
211 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
234 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
235 next->context.ctx_id)
400 * same process. Using the mm pointer instead of mm->context.ctx_id
561 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
562 next->context.ctx_id);
638 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
725 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, m
[all...]
/linux-master/drivers/gpu/drm/imx/dcss/
H A Ddcss-ctxld.c332 void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val, argument
341 int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
343 if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
348 ctx[ctx_id][item_idx].val = val;
349 ctx[ctx_id][item_idx].ofs = reg_ofs;
350 ctxld->ctx_size[curr_ctx][ctx_id] += 1;
353 void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, argument
357 dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
H A Ddcss-ss.c68 u32 ctx_id; member in struct:dcss_ss
78 dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
101 ss->ctx_id = CTX_SB_HP;
/linux-master/Documentation/gpu/rfc/
H A Di915_vm_bind.h226 * @ctx_id: Context id
230 __u32 ctx_id; member in struct:drm_i915_gem_execbuffer3
235 * An index in the user engine map of the context specified by @ctx_id.
/linux-master/drivers/accel/habanalabs/common/
H A Dcommand_buffer.c104 int ctx_id, bool internal_cb)
118 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
137 } else if (ctx_id == HL_KERNEL_ASID_ID) {
186 int rc, ctx_id = cb_args->ctx->asid; local
194 if (ctx_id == HL_KERNEL_ASID_ID &&
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
226 if (ctx_id == HL_KERNEL_ASID_ID) {
278 int ctx_id = ctx->asid; local
280 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
294 ctx_id
103 hl_cb_alloc(struct hl_device *hdev, u32 cb_size, int ctx_id, bool internal_cb) argument
[all...]
/linux-master/drivers/net/ethernet/netronome/nfp/flower/
H A Doffload.c1497 u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); local
1499 parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1633 u32 ctx_id; local
1635 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1636 pkts = priv->stats[ctx_id].pkts;
1640 bytes = priv->stats[ctx_id].bytes;
1641 used = priv->stats[ctx_id].used;
1644 priv->stats[ctx_id].pkts = 0;
1645 priv->stats[ctx_id].bytes = 0;
1653 ctx_id
1691 u32 ctx_id; local
[all...]
H A Dmetadata.c121 u32 ctx_id; local
129 ctx_id = be32_to_cpu(stats->stats_con_id);
130 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
131 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
132 priv->stats[ctx_id].used = jiffies;
435 nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id) argument
440 ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
/linux-master/drivers/infiniband/hw/efa/
H A Defa_com.c246 u16 ctx_id; local
249 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
253 return ctx_id;
257 u16 ctx_id)
261 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
270 u16 ctx_id = cmd_id & (aq->depth - 1); local
274 efa_com_dealloc_ctx_id(aq, ctx_id);
280 u16 ctx_id = cmd_id & (aq->depth - 1); local
282 if (aq->comp_ctx[ctx_id].occupied && capture) {
291 aq->comp_ctx[ctx_id]
256 efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq, u16 ctx_id) argument
309 u16 ctx_id; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_execlist.c45 u32 ctx_id)
54 xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
55 lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
57 xe_gt_assert(hwe->gt, FIELD_FIT(SW_CTX_ID, ctx_id));
58 lrc_desc |= FIELD_PREP(SW_CTX_ID, ctx_id);
44 __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, u32 ctx_id) argument
/linux-master/include/uapi/drm/
H A Damdgpu_drm.h289 __u32 ctx_id; member in struct:drm_amdgpu_ctx_in
296 __u32 ctx_id; member in struct:drm_amdgpu_ctx_out::__anon1267
350 __u32 ctx_id; member in struct:drm_amdgpu_sched_in
482 __u32 ctx_id; member in struct:drm_amdgpu_wait_cs_in
496 __u32 ctx_id; member in struct:drm_amdgpu_fence
621 __u32 ctx_id; member in struct:drm_amdgpu_cs_in
687 __u32 ctx_id; member in struct:drm_amdgpu_cs_chunk_dep
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cmdq.c55 cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
57 used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
82 cmdq->wait_pool[i].ctx_id = i;
293 comp_wait->ctx_id) |
305 u16 ctx_id; local
320 ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
321 comp_wait = &cmdq->wait_pool[ctx_id];

Completed in 429 milliseconds

1234