Searched refs:intr (Results 51 - 75 of 528) sorted by path

1234567891011>>

/linux-master/drivers/crypto/marvell/octeontx/
H A Dotx_cptvf_main.c497 u64 intr; local
499 intr = cptvf_read_vf_misc_intr_status(cptvf);
501 if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
503 intr, cptvf->vfid);
506 } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
512 intr, cptvf->vfid);
513 } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
517 intr, cptvf->vfid);
518 } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
522 intr, cptv
572 u32 intr = cptvf_read_vq_done_count(cptvf); local
[all...]
/linux-master/drivers/crypto/marvell/octeontx2/
H A Dotx2_cptpf_main.c180 u64 intr; local
186 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
188 if (!intr)
192 if (!(intr & BIT_ULL(vf)))
212 u64 intr; local
218 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
220 if (!intr)
223 if (!(intr & BIT_ULL(vf)))
H A Dotx2_cptpf_mbox.c350 u64 intr; local
358 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
363 if (intr & (1ULL << vf->intr_idx)) {
428 u64 intr; local
431 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
433 if (intr & 0x1ULL) {
H A Dotx2_cptvf_mbox.c53 u64 intr; local
56 intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
59 if (intr & 0x1ULL) {
/linux-master/drivers/crypto/
H A Dn2_core.c1482 * So we have to back-translate, going through the 'intr' and 'ino'
1491 unsigned int intr; local
1501 intr = ip->ino_table[i].intr;
1508 if (dev_intrs[i] == intr)
1767 b->intr = i + 1;
/linux-master/drivers/dma-buf/
H A Ddma-fence.c486 * @intr: if true, do an interruptible wait
501 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) argument
516 ret = fence->ops->wait(fence, intr, timeout);
518 ret = dma_fence_default_wait(fence, intr, timeout);
752 * @intr: if true, do an interruptible wait
761 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) argument
772 if (intr && signal_pending(current)) {
787 if (intr)
796 if (ret > 0 && intr && signal_pending(current))
832 * @intr
848 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx) argument
[all...]
H A Ddma-resv.c661 * @intr: if true, do interruptible wait
671 bool intr, unsigned long timeout)
680 ret = dma_fence_wait_timeout(fence, intr, ret);
670 dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, bool intr, unsigned long timeout) argument
H A Dst-dma-fence.c46 static long mock_wait(struct dma_fence *f, bool intr, long timeout) argument
48 const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
/linux-master/drivers/dma/
H A Dfsl-edma-main.c36 unsigned int intr, ch; local
39 intr = edma_readl(fsl_edma, regs->intl);
40 if (!intr)
44 if (intr & (0x1 << ch)) {
55 unsigned int intr; local
57 intr = edma_readl_chreg(fsl_chan, ch_int);
58 if (!intr)
H A Dfsl-qdma.c740 unsigned int intr; local
748 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
750 if (intr) {
757 intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
767 unsigned int intr, reg; local
781 intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
783 if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
784 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
786 if (intr != 0) {
/linux-master/drivers/extcon/
H A Dextcon-fsa9480.c247 int intr = 0; local
250 fsa9480_read_irq(usbsw, &intr);
251 if (!intr)
/linux-master/drivers/gpio/
H A Dgpio-xgs-iproc.c35 void __iomem *intr; member in struct:iproc_gpio_chip
175 int_status = readl_relaxed(chip->intr + IPROC_CCA_INT_STS);
255 chip->intr = devm_platform_ioremap_resource(pdev, 1);
256 if (IS_ERR(chip->intr))
257 return PTR_ERR(chip->intr);
260 val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
262 writel_relaxed(val, chip->intr + IPROC_CCA_INT_MASK);
298 if (chip->intr) {
301 val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
303 writel_relaxed(val, chip->intr
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_amdkfd.h318 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
H A Damdgpu_amdkfd_gpuvm.c1225 * @intr: Whether the wait is interruptible
1232 bool wait, bool intr)
1237 ret = amdgpu_sync_wait(ctx->sync, intr);
2175 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2186 ret = amdgpu_sync_wait(&sync, intr);
1231 unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, bool wait, bool intr) argument
2174 amdgpu_amdkfd_gpuvm_sync_memory( struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) argument
H A Damdgpu_object.c1474 * @intr: Whether the wait is interruptible
1483 bool intr)
1490 r = amdgpu_sync_wait(&sync, intr);
1499 * @intr: Whether the wait is interruptible
1505 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1510 AMDGPU_SYNC_NE_OWNER, owner, intr);
1468 amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode, void *owner, bool intr) argument
1492 amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) argument
H A Damdgpu_object.h340 bool intr);
341 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
H A Damdgpu_sync.c403 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) argument
410 r = dma_fence_wait(e->fence, intr);
H A Damdgpu_sync.h59 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_svm.c1478 bool intr; member in struct:svm_validate_context
1483 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) argument
1490 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1577 bool intr, bool wait, bool flush_tlb)
1591 ctx->intr = intr;
1644 r = svm_range_reserve_bos(ctx, intr);
1574 svm_range_validate_and_map(struct mm_struct *mm, unsigned long map_start, unsigned long map_last, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb) argument
/linux-master/drivers/gpu/drm/
H A Ddrm_suballoc.c301 * @intr: Whether to perform waits interruptible. This should typically
314 gfp_t gfp, bool intr, size_t align)
363 t = dma_fence_wait_any_timeout(fences, count, intr,
371 } else if (intr) {
313 drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size, gfp_t gfp, bool intr, size_t align) argument
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_gpu.c1537 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); local
1539 if (intr != 0) {
1544 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1546 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1548 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1551 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1555 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1558 while ((event = ffs(intr)) != 0) {
1563 intr
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_object.c922 * @intr: Whether to wait interruptible.
932 bool intr)
939 intr, MAX_SCHEDULE_TIMEOUT);
931 i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr) argument
H A Di915_gem_object.h165 bool intr)
169 if (intr)
192 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
198 WARN_ON(ww && !ww->intr);
759 bool intr);
163 __i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, bool intr) argument
H A Di915_gem_ttm_move.c696 * @intr: Whether to perform waits interruptible:
701 * Return: Zero on success. Negative error code on error. If @intr == true,
706 bool allow_accel, bool intr)
711 .interruptible = intr,
704 i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, struct drm_i915_gem_object *src, bool allow_accel, bool intr) argument
H A Di915_gem_ttm_move.h29 bool allow_accel, bool intr);

Completed in 333 milliseconds

1234567891011>>