Searched refs:scheduled (Results 1 - 25 of 29) sorted by relevance

12

/linux-master/include/linux/
H A Dposix-timers_types.h64 * @work: The task work to be scheduled
66 * @scheduled: @work has been scheduled already, no further processing
71 unsigned int scheduled; member in struct:posix_cputimers_work
/linux-master/net/mptcp/
H A Dsched.c119 bool scheduled)
121 WRITE_ONCE(subflow->scheduled, scheduled);
143 if (READ_ONCE(subflow->scheduled))
165 if (READ_ONCE(subflow->scheduled))
118 mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow, bool scheduled) argument
H A Dprotocol.h518 bool scheduled;
687 bool scheduled);
/linux-master/kernel/
H A Dworkqueue_internal.h42 struct list_head scheduled; /* L: scheduled works */ member in struct:worker
H A Dworkqueue.c277 * pool->worklist or worker->scheduled. Those work itmes are only struct
454 /* CPU where unbound work was last round robin scheduled from this CPU */
1134 * @work: start of series of works to be scheduled
1139 * scheduled starts at @work and includes any consecutive work with
1163 * multiple works to the scheduled queue, the next position
1180 * scheduled work. This allows assign_work() to be nested inside
1204 move_linked_works(work, &collision->scheduled, nextp);
1208 move_linked_works(work, &worker->scheduled, nextp);
2581 * @delay is zero and @dwork is idle, it will be scheduled for immediate
2613 * zero, @work is guaranteed to be scheduled immediatel
[all...]
/linux-master/drivers/gpu/drm/scheduler/
H A Dsched_fence.c66 /* Set the parent before signaling the scheduled fence, such that,
68 * been scheduled (which is the case for drivers delegating waits
75 dma_fence_signal(&fence->scheduled);
141 * Drop the extra reference from the scheduled fence to the base fence.
147 dma_fence_put(&fence->scheduled);
197 return container_of(f, struct drm_sched_fence, scheduled);
228 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
H A Dsched_entity.c166 * drm_sched_entity_error - return error of last scheduled job
169 * Opportunistically return the error of the last scheduled job. Result can
209 if (s_fence && f == &s_fence->scheduled) {
210 /* The dependencies array had a reference on the scheduled
219 * had on the scheduled fence.
221 dma_fence_put(&s_fence->scheduled);
416 * Fence is a scheduled/finished fence from a job
430 * it to be scheduled
432 fence = dma_fence_get(&s_fence->scheduled);
439 /* Ignore it when it is already scheduled */
[all...]
H A Dsched_main.c28 * into software queues which are then scheduled on a hardware run queue.
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
746 guilty_context = s_job->s_fence->scheduled.context;
749 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
1090 /* make the scheduled timestamp more accurate */
1096 &next->s_fence->scheduled.flags))
1097 next->s_fence->scheduled.timestamp =
1380 * jobs from it will not be scheduled further
1401 if (bad->s_fence->scheduled
[all...]
/linux-master/drivers/infiniband/hw/hfi1/
H A Drc.h53 u8 *prev_ack, bool *scheduled);
/linux-master/net/sctp/
H A Dstream_sched_prio.c64 /* Look into scheduled priorities first, as they are sorted and
65 * we can find it fast IF it's scheduled.
105 bool scheduled = false; local
111 scheduled = true;
127 return scheduled;
137 /* Nothing to do if already scheduled */
/linux-master/arch/s390/pci/
H A Dpci_irq.c179 atomic_t scheduled; member in struct:cpu_irq_data
185 atomic_t *scheduled = data; local
189 } while (atomic_dec_return(scheduled));
212 if (atomic_inc_return(&cpu_data->scheduled) > 1)
215 INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c200 struct dma_fence *fence = &leader->base.s_fence->scheduled;
220 if (job->gang_submit != &job->base.s_fence->scheduled)
334 /* Signal all jobs not yet scheduled */
342 dma_fence_signal(&s_fence->scheduled);
350 /* Signal all jobs already scheduled to HW */
H A Damdgpu_sync.c297 * when they are scheduled.
300 if (dma_fence_is_signaled(&s_fence->scheduled))
303 return &s_fence->scheduled;
H A Damdgpu_ib.c154 job->base.s_fence->scheduled.context : 0;
385 /* for MM engines in hypervisor side they are not scheduled together
395 /* for CP & SDMA engines since they are scheduled together so
H A Damdgpu_ctx.c177 /* When the fence is not even scheduled it can't have spend time */
179 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
184 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
187 s_fence->scheduled.timestamp);
H A Damdgpu_cs.c422 fence = dma_fence_get(&s_fence->scheduled);
1274 fence = &p->jobs[i]->base.s_fence->scheduled;
/linux-master/kernel/time/
H A Dposix-cpu-timers.c1225 p->posix_cputimers_work.scheduled = false;
1238 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1245 return tsk->posix_cputimers_work.scheduled;
1250 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1254 tsk->posix_cputimers_work.scheduled = true;
1269 tsk->posix_cputimers_work.scheduled = false;
1290 tsk->posix_cputimers_work.scheduled = false;
1442 * work is already scheduled there is no point to do anything here.
/linux-master/include/drm/
H A Dgpu_scheduler.h102 * Runqueue on which this entity is currently scheduled.
114 * be scheduled on any scheduler on this list.
170 * &drm_sched_fence.scheduled uses the fence_context but
199 * Points to the finished fence of the last scheduled job. Only written
244 * struct drm_sched_rq - queue of entities to be scheduled.
248 * @entities: list of the entities to be scheduled.
249 * @current_entity: the entity which is to be scheduled.
269 * @scheduled: this fence is what will be signaled by the scheduler
270 * when the job is scheduled.
272 struct dma_fence scheduled; member in struct:drm_sched_fence
[all...]
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_sync.c233 /* If this is a native dependency, we wait for the scheduled fence,
237 dma_fence_get(&s_fence->scheduled));
H A Dpvr_job.c621 if (&geom_job->base.s_fence->scheduled == fence)
640 return dma_fence_get(&job->base.s_fence->scheduled);
643 /* If we didn't find any, we just return the last queued job scheduled
H A Dpvr_queue.c477 if (f == &job->base.s_fence->scheduled)
625 &job->paired_job->base.s_fence->scheduled == fence)
1153 /* Keep track of the last queued job scheduled fence for combined submit. */
1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled);
/linux-master/drivers/scsi/libsas/
H A Dsas_scsi_host.c392 int scheduled = 0, tries = 100; local
400 while (!scheduled && tries--) {
404 scheduled = 1;
414 if (scheduled)
735 /* check if any new eh work was scheduled during the last run */
/linux-master/drivers/rtc/
H A Dinterface.c412 time64_t now, scheduled; local
419 scheduled = rtc_tm_to_time64(&alarm->time);
427 if (scheduled <= now)
/linux-master/kernel/locking/
H A Dlockdep.c392 * @scheduled: Whether or not an RCU callback has been scheduled.
398 int scheduled; member in struct:delayed_free
6197 if (delayed_free.scheduled)
6200 delayed_free.scheduled = true;
6241 delayed_free.scheduled = false;
/linux-master/arch/x86/crypto/
H A Dsha512-ssse3-asm.S256 movdqa %xmm0, W_t(\rnd) # Store scheduled qwords

Completed in 231 milliseconds

12