Searched refs:queued (Results 1 - 25 of 109) sorted by last modified time

12345

/linux-master/drivers/spi/
H A Dspi.c2077 * spi_get_next_queued_message() - called by driver to check for queued
2079 * @ctlr: the controller to check for queued messages
2289 * spi_queued_transfer - transfer function for queued transfers
2291 * @msg: SPI message which is to handled is queued to driver queue
2314 ctlr->queued = true;
2336 * memory operations do not preempt regular SPI transfers that have been queued
3359 * If we're using a queued driver, start the queue. Note that we don't
3474 if (ctlr->queued) {
3525 /* Basically no-ops for non-queued controllers */
3526 if (ctlr->queued) {
[all...]
/linux-master/kernel/sched/
H A Dfair.c5503 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) argument
5518 * queued ticks are scheduled to match the slice, so don't bother
5521 if (queued) {
6032 /* Already queued for async unthrottle */
6445 * exist throttled cfs_rq(s), and the period_timer must have queued the
7553 * The assumption is that the wakee queued work for the
12610 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) argument
12617 entity_tick(cfs_rq, se, queued);
H A Dsched.h439 * are queued on this cfs_rq, so a weight of a entity should not be
697 int curr; /* highest queued rt task prio */
1446 /* runqueue on which this entity is (to be) queued */
1811 * Don't (re)queue an already queued item; nor queue anything when
2297 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
H A Dcore.c954 * it's already queued (either by us or someone else) and will get the
1005 * queued for wakeup.
1757 * Lock the task and the rq where the task is (or was) queued.
2293 int running, queued, match; local
2332 queued = task_on_rq_queued(p);
2337 * still queued so it will wait.
2340 queued = 1;
2371 if (unlikely(queued)) {
2516 * move_queued_task - move a queued task to new rq.
2757 bool queued, runnin local
7152 int prio, oldprio, queued, running, queue_flag = local
7276 bool queued, running; local
7709 int retval, oldprio, newprio, queued, running; local
9392 bool queued, running; local
10485 int queued, running, queue_flags = local
[all...]
/linux-master/net/mac80211/
H A Dtx.c499 * been queued to pending queue. No reordering can happen, go
531 * We queued up some frames, so the TIM bit might
1111 bool queued = false; local
1149 queued = true;
1153 "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
1173 return queued;
1259 bool queued; local
1261 queued = ieee80211_tx_prep_agg(tx, skb, info,
1264 if (unlikely(queued))
1743 * Returns false if the frame couldn't be transmitted but was queued instea
[all...]
/linux-master/fs/nfsd/
H A Dnfs4callback.c1242 * task was queued, we need to resubmit it after setting up
1547 bool queued; local
1550 queued = nfsd4_queue_cb(cb);
1551 if (!queued)
1553 return queued;
/linux-master/drivers/net/
H A Dtun.c2571 int flush = 0, queued = 0; local
2582 queued += ret;
2588 if (tfile->napi_enabled && queued > 0)
/linux-master/drivers/vhost/
H A Dvhost.c252 bool queued = false; local
257 queued = true;
262 return queued;
678 * Take the worker mutex to make sure we see the work queued from
688 * not be any works queued for scsi and net.
707 /* Make sure whatever was queued gets run */
/linux-master/io_uring/
H A Dio_uring.c837 unsigned int free, queued, len; local
848 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
849 free = ctx->cq_entries - queued;
1175 * Run queued task_work, returning the number of entries processed in *count.
1304 * they can even be queued lazily, fall back to non-lazy.
/linux-master/fs/
H A Daio.c1877 bool queued; member in struct:aio_poll_table
1888 if (unlikely(pt->queued)) {
1893 pt->queued = true;
1925 apt.queued = false;
1934 if (likely(apt.queued)) {
2077 * the number of iocbs queued. May return -EINVAL if the aio_context
/linux-master/drivers/scsi/qla2xxx/
H A Dqla_target.c266 uint8_t queued = 0; local
292 if (!queued) {
293 queued = 1;
/linux-master/block/
H A Dblk-mq.c2002 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, argument
2005 if (hctx->queue->mq_ops->commit_rqs && queued) {
2006 trace_block_unplug(hctx->queue, queued, !from_schedule);
2020 int queued; local
2031 queued = 0;
2048 * once the request is queued to lld, no need to cover the
2056 queued++;
2085 blk_mq_commit_rqs(hctx, queued, false);
2170 * BLK_MQ_CPU_WORK_BATCH queued items.
2687 int queued local
2824 int queued = 0; local
[all...]
H A Dmq-deadline.c289 /* Number of requests queued for a given priority level. */
529 * For a zoned block device, if we only have writes queued and none of
670 uint32_t queued; local
676 queued = dd_queued(dd, prio);
679 WARN_ONCE(queued != 0,
912 * dispatching requests if all the queued requests are write requests directed
1237 {"queued", 0400, dd_queued_show},
/linux-master/drivers/iommu/
H A Ddma-iommu.c265 /* The IOVAs will be torn down separately, so just free our queued pages */
817 else if (gather && gather->queued)
839 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
844 if (!iotlb_gather.queued)
/linux-master/net/rds/
H A Dsend.c131 * - queued acks can be delayed behind large messages
133 * - small message latency is higher behind queued large messages
153 * sendmsg calls here after having queued its message on the send
435 * not try and send their newly queued message. We need to check the
678 * Transports call here when they've determined that the receiver queued
813 * we only want this to fire once so we use the callers 'queued'. It's
820 __be16 dport, int *queued)
825 if (*queued)
872 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
876 *queued
817 rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, struct rds_conn_path *cp, struct rds_message *rm, __be16 sport, __be16 dport, int *queued) argument
1110 int queued = 0, allocated_mr = 0; local
[all...]
/linux-master/drivers/tty/serial/
H A Damba-pl011.c248 bool queued; member in struct:pl011_dmatx_data
543 if (uap->dmatx.queued)
557 * a TX buffer completing, we must update the tx queued status to
562 uap->dmatx.queued = false;
581 * 1 if we queued up a TX DMA buffer.
602 uap->dmatx.queued = false;
635 uap->dmatx.queued = false;
644 uap->dmatx.queued = false;
665 uap->dmatx.queued = true;
685 * true if we queued
[all...]
/linux-master/drivers/tty/hvc/
H A Dhvc_iucv.c461 int queued; local
470 queued = hvc_iucv_queue(priv, buf, count);
473 return queued;
547 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
/linux-master/fs/f2fs/
H A Dsegment.c959 dc->queued = 0;
1080 atomic_sub(dc->queued, &dcc->queued_discard);
1235 dc->queued++;
1344 dc->queued++;
H A Df2fs.h314 ktime_t queue_time; /* request queued time */
323 atomic_t queued_ckpt; /* # of queued ckpts */
370 unsigned char queued; /* queued discard */ member in struct:discard_cmd
426 atomic_t queued_discard; /* # of queued discard */
1039 atomic_t queued_flush; /* # of queued flushes */
/linux-master/virt/kvm/
H A Dasync_pf.c112 * need to be flushed (but sanity check that the work wasn't queued).
161 vcpu->async_pf.queued = 0;
181 vcpu->async_pf.queued--;
195 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
219 vcpu->async_pf.queued++;
250 vcpu->async_pf.queued++;
/linux-master/include/linux/
H A Dkvm_host.h366 u32 queued; member in struct:kvm_vcpu::__anon682
H A Diommu.h330 * @queued: Indicates that the flush will be queued
336 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
344 bool queued; member in struct:iommu_iotlb_gather
613 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
933 return gather && gather->queued;
/linux-master/fs/fuse/
H A Ddev.c528 bool queued = false; local
543 queued = true;
547 return queued;
/linux-master/include/linux/spi/
H A Dspi.h437 * @queued: whether this controller is providing an internal message queue
697 bool queued; member in struct:spi_controller
951 * well as the data buffers) for as long as the message is queued.
1113 * @spi: SPI device to which the transaction is queued
1135 * queued, and might complete after transactions to other devices. Messages
/linux-master/fs/xfs/
H A Dxfs_mru_cache.c102 unsigned int queued; /* work has been queued */ member in struct:xfs_mru_cache
204 if (!mru->queued) {
205 mru->queued = 1;
280 mru->queued = next;
281 if ((mru->queued > 0)) {
389 if (mru->queued) {

Completed in 472 milliseconds

12345