/linux-master/drivers/dma/qcom/ |
H A D | hidma.h | 30 bool queued; /* flag whether this is pending */ member in struct:hidma_tre 102 struct list_head queued; member in struct:hidma_chan
|
H A D | hidma_dbg.c | 31 seq_printf(s, "queued = 0x%x\n", tre->queued);
|
H A D | hidma.c | 172 bool queued = false; local 178 queued = true; 188 if (queued) { 213 INIT_LIST_HEAD(&mchan->queued); 237 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { 330 /* Move descriptor to queued */ 331 list_move_tail(&mdesc->node, &mchan->queued); 487 list_splice_init(&mchan->queued, &list);
|
H A D | hidma_ll.c | 160 tre->queued = 0; 174 * Multiple TREs may be queued and waiting in the pending queue. 223 tre->queued = 0; 276 * get queued to the SW for processing. 539 tre->queued = 1;
|
/linux-master/net/ax25/ |
H A D | ax25_ds_in.c | 147 int queued = 0; local 240 queued = ax25_rx_iframe(ax25, skb); 273 return queued; 281 int queued = 0, frametype, ns, nr, pf; local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); 297 return queued;
|
H A D | ax25_std_in.c | 143 int queued = 0; local 225 queued = ax25_rx_iframe(ax25, skb); 258 return queued; 268 int queued = 0; local 380 queued = ax25_rx_iframe(ax25, skb); 413 return queued; 421 int queued = 0, frametype, ns, nr, pf; local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); 433 queued [all...] |
/linux-master/net/lapb/ |
H A D | lapb_in.c | 249 int queued = 0; local 403 queued = 1; 464 if (!queued)
|
/linux-master/arch/powerpc/platforms/powernv/ |
H A D | opal.c | 310 * If the registration succeeded, replay any queued messages that came 334 bool queued = false; local 343 queued = true; 347 if (queued)
|
/linux-master/block/ |
H A D | bfq-cgroup.c | 175 if (blkg_rwstat_total(&stats->queued)) 217 blkg_rwstat_total(&stats->queued)); 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); 358 /* queued stats shouldn't be cleared */ 379 /* queued stats shouldn't be cleared */ 444 blkg_rwstat_exit(&stats->queued); 465 blkg_rwstat_init(&stats->queued, gfp) || 1361 .private = offsetof(struct bfq_group, stats.queued), 1404 .private = offsetof(struct bfq_group, stats.queued), [all...] |
H A D | bfq-iosched.c | 478 if (bfqd->queued != 0) { 686 * queue / cgroup already has many requests allocated and queued, this does not 1866 * processes. So let also stably-merged queued enjoy weight 2214 bfqq->queued[rq_is_sync(rq)]++; 2216 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it 2219 WRITE_ONCE(bfqd->queued, bfqd->queued + 1); 2394 bfqq->queued[sync]--; 2396 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it 2399 WRITE_ONCE(bfqd->queued, bfq [all...] |
H A D | bfq-iosched.h | 280 /* number of sync and async requests queued */ 281 int queued[2]; member in struct:bfq_queue 539 * queues with at least one request queued. This 589 /* number of queued requests */ 590 int queued; member in struct:bfq_data 833 * If the number of I/O requests queued in the device for a 931 /* number of IOs queued up */ 932 struct blkg_rwstat queued; member in struct:bfqg_stats 935 /* sum of number of ios queued across all samples */ 945 /* total time with empty current active q with other requests queued */ [all...] |
H A D | blk-mq.c | 2002 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, argument 2005 if (hctx->queue->mq_ops->commit_rqs && queued) { 2006 trace_block_unplug(hctx->queue, queued, !from_schedule); 2020 int queued; local 2031 queued = 0; 2048 * once the request is queued to lld, no need to cover the 2056 queued++; 2085 blk_mq_commit_rqs(hctx, queued, false); 2170 * BLK_MQ_CPU_WORK_BATCH queued items. 2687 int queued local 2824 int queued = 0; local [all...] |
H A D | blk-throttle.c | 66 /* Total Number of queued bios on READ and WRITE lists */ 255 * @queued: the service_queue->queued[] list @qn belongs to 257 * Add @bio to @qn and put @qn on @queued if it's not already on. 262 struct list_head *queued) 266 list_add_tail(&qn->node, queued); 273 * @queued: the qnode list to peek 275 static struct bio *throtl_peek_queued(struct list_head *queued) argument 280 if (list_empty(queued)) 283 qn = list_first_entry(queued, struc 261 throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, struct list_head *queued) argument 303 throtl_pop_queued(struct list_head *queued, struct throtl_grp **tg_to_put) argument [all...] |
H A D | blk-throttle.h | 14 * To avoid such starvation, dispatched bios are queued separately 19 * throtl_qnode is used to keep the queued bios separated by their sources. 20 * Bios are queued to throtl_qnode which in turn is queued to 24 * belongs to a throtl_grp and gets queued on itself or the parent, so 26 * queued and decrementing when dequeued is enough to keep the whole blkg 30 struct list_head node; /* service_queue->queued[] */ 31 struct bio_list bios; /* queued bios */ 39 * Bios queued directly to this service_queue or dispatched from 42 struct list_head queued[ member in struct:throtl_service_queue [all...] |
H A D | mq-deadline.c | 289 /* Number of requests queued for a given priority level. */ 529 * For a zoned block device, if we only have writes queued and none of 670 uint32_t queued; local 676 queued = dd_queued(dd, prio); 679 WARN_ONCE(queued != 0, 912 * dispatching requests if all the queued requests are write requests directed 1237 {"queued", 0400, dd_queued_show},
|
/linux-master/drivers/acpi/apei/ |
H A D | ghes.c | 468 * Ensure any queued work has been done before we return to the context that 533 bool queued = false; local 557 queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags); 571 return queued; 684 bool queued = false; local 703 queued = ghes_handle_memory_failure(gdata, sev, sync); 709 queued = ghes_handle_arm_hw_error(gdata, sev, sync); 720 return queued;
|
/linux-master/drivers/atm/ |
H A D | eni.c | 147 static int tx_complete = 0,dma_complete = 0,queued = 0,requeued = 0, variable 1177 queued++; 1462 "tx_complete=%d,dma_complete=%d,queued=%d,requeued=%d,sub=%d,\n" 1464 tx_complete,dma_complete,queued,requeued,submitted,backlogged,
|
/linux-master/drivers/block/drbd/ |
H A D | drbd_worker.c | 633 int queued = sk->sk_wmem_queued; local 635 if (queued > sndbuf / 2) { 2097 * even if no new requests are queued yet.
|
/linux-master/drivers/crypto/inside-secure/ |
H A D | safexcel_cipher.c | 693 int queued, i, ret = 0; local 734 queued = totlen_src; 799 if (queued < len) 800 len = queued; 803 !(queued - len), 816 queued -= len; 817 if (!queued)
|
H A D | safexcel_hash.c | 323 u64 queued, len; local 325 queued = safexcel_queued_len(req); 326 if (queued <= HASH_CACHE_SIZE) 327 cache_len = queued; 329 cache_len = queued - areq->nbytes; 332 /* If this is not the last request and the queued data does not 335 extra = queued & (HASH_CACHE_SIZE - 1); 337 /* If this is not the last request and the queued data 347 queued -= extra; 349 if (!queued) { [all...] |
/linux-master/drivers/dma/ |
H A D | mpc512x_dma.c | 197 struct list_head queued; member in struct:mpc_dma_chan 247 * Execute all queued DMA descriptors. 252 * c) mchan->queued list contains at least one entry. 262 while (!list_empty(&mchan->queued)) { 263 mdesc = list_first_entry(&mchan->queued, 339 /* Execute queued descriptors */ 341 if (!list_empty(&mchan->queued)) 472 list_move_tail(&mdesc->node, &mchan->queued); 474 /* If channel is idle, execute all queued descriptors */ 558 BUG_ON(!list_empty(&mchan->queued)); [all...] |
H A D | nbpfaxi.c | 194 * @queued: list of queued descriptors 220 struct list_head queued; member in struct:nbpf_channel 609 if (list_empty(&chan->queued)) 612 list_splice_tail_init(&chan->queued, &chan->active); 653 list_for_each_entry(desc, &chan->queued, node) 681 list_add_tail(&desc->node, &chan->queued); 849 list_splice_init(&chan->queued, &head); 1046 INIT_LIST_HEAD(&chan->queued); 1235 /* On error: abort all queued transfer [all...] |
/linux-master/drivers/gpio/ |
H A D | gpio-virtio.c | 36 bool queued; member in struct:vgpio_irq_line 213 if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled)) 228 irq_line->queued = true; 372 irq_line->queued = false;
|
/linux-master/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 51 list_add_tail(&task->node, &work->queued); 79 * drm_flip_work_commit - commit queued work 81 * @wq: the work-queue to run the queued work on 83 * Trigger work previously queued by drm_flip_work_queue() to run 86 * prior), and then from vblank irq commit the queued work. 94 list_splice_tail(&work->queued, &work->commited); 95 INIT_LIST_HEAD(&work->queued); 138 INIT_LIST_HEAD(&work->queued); 155 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
|
/linux-master/drivers/iommu/ |
H A D | dma-iommu.c | 265 /* The IOVAs will be torn down separately, so just free our queued pages */ 817 else if (gather && gather->queued) 839 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); 844 if (!iotlb_gather.queued)
|