/linux-master/drivers/net/wireless/mediatek/mt76/ |
H A D | dma.c | 279 q->queued++; 343 q->queued++; 396 while (q->queued > 0 && q->tail != last) { 417 if (!q->queued) 482 if (!q->queued) 496 q->queued--; 511 if (q->queued + 1 >= q->ndesc - 1) 589 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 644 while (q->queued < q->ndesc - 1) {
|
H A D | wed.c | 143 q->queued = q->head; 150 q->queued = q->head;
|
H A D | usb.c | 444 if (q->queued > 0) { 447 q->queued--; 582 q->queued++; 654 q->queued = 0; 775 while (q->queued > 0) { 785 if (!q->queued) 863 if (q->queued == q->ndesc) 881 q->queued++; 1027 while (q->queued > 0) {
|
H A D | mt792x_debugfs.c | 89 seq_printf(s, "AC%d: queued=%d\n", i, qlen); 118 "%s: queued=%d head=%d tail=%d\n", 119 queue_map[i].queue, q->queued, q->head,
|
H A D | sdio_txrx.c | 133 if (q->queued + i + 1 == q->ndesc) 143 q->queued += i;
|
/linux-master/drivers/dma/ |
H A D | mpc512x_dma.c | 197 struct list_head queued; member in struct:mpc_dma_chan 247 * Execute all queued DMA descriptors. 252 * c) mchan->queued list contains at least one entry. 262 while (!list_empty(&mchan->queued)) { 263 mdesc = list_first_entry(&mchan->queued, 339 /* Execute queued descriptors */ 341 if (!list_empty(&mchan->queued)) 472 list_move_tail(&mdesc->node, &mchan->queued); 474 /* If channel is idle, execute all queued descriptors */ 558 BUG_ON(!list_empty(&mchan->queued)); [all...] |
H A D | nbpfaxi.c | 194 * @queued: list of queued descriptors 220 struct list_head queued; member in struct:nbpf_channel 609 if (list_empty(&chan->queued)) 612 list_splice_tail_init(&chan->queued, &chan->active); 653 list_for_each_entry(desc, &chan->queued, node) 681 list_add_tail(&desc->node, &chan->queued); 849 list_splice_init(&chan->queued, &head); 1046 INIT_LIST_HEAD(&chan->queued); 1235 /* On error: abort all queued transfer [all...] |
/linux-master/block/ |
H A D | blk-throttle.c | 66 /* Total Number of queued bios on READ and WRITE lists */ 255 * @queued: the service_queue->queued[] list @qn belongs to 257 * Add @bio to @qn and put @qn on @queued if it's not already on. 262 struct list_head *queued) 266 list_add_tail(&qn->node, queued); 273 * @queued: the qnode list to peek 275 static struct bio *throtl_peek_queued(struct list_head *queued) argument 280 if (list_empty(queued)) 283 qn = list_first_entry(queued, struc 261 throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, struct list_head *queued) argument 303 throtl_pop_queued(struct list_head *queued, struct throtl_grp **tg_to_put) argument [all...] |
H A D | bfq-iosched.h | 280 /* number of sync and async requests queued */ 281 int queued[2]; member in struct:bfq_queue 539 * queues with at least one request queued. This 589 /* number of queued requests */ 590 int queued; member in struct:bfq_data 833 * If the number of I/O requests queued in the device for a 931 /* number of IOs queued up */ 932 struct blkg_rwstat queued; member in struct:bfqg_stats 935 /* sum of number of ios queued across all samples */ 945 /* total time with empty current active q with other requests queued */ [all...] |
H A D | bfq-cgroup.c | 175 if (blkg_rwstat_total(&stats->queued)) 217 blkg_rwstat_total(&stats->queued)); 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); 358 /* queued stats shouldn't be cleared */ 379 /* queued stats shouldn't be cleared */ 444 blkg_rwstat_exit(&stats->queued); 465 blkg_rwstat_init(&stats->queued, gfp) || 1361 .private = offsetof(struct bfq_group, stats.queued), 1404 .private = offsetof(struct bfq_group, stats.queued), [all...] |
/linux-master/drivers/dma/qcom/ |
H A D | hidma.c | 172 bool queued = false; local 178 queued = true; 188 if (queued) { 213 INIT_LIST_HEAD(&mchan->queued); 237 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { 330 /* Move descriptor to queued */ 331 list_move_tail(&mdesc->node, &mchan->queued); 487 list_splice_init(&mchan->queued, &list);
|
H A D | hidma_dbg.c | 31 seq_printf(s, "queued = 0x%x\n", tre->queued);
|
/linux-master/kernel/ |
H A D | stop_machine.c | 288 * queued up in reverse order leading to system deadlock. 291 * queued a work on cpu1 but not on cpu2, we hold both locks. 381 * true if cpu_stop_work was queued successfully and @fn will be called, 397 bool queued = false; local 414 queued = true; 420 return queued;
|
/linux-master/sound/usb/ |
H A D | pcm.c | 38 int queued; local 41 queued = bytes_to_frames(runtime, subs->inflight_bytes); 42 if (!queued) 61 est_delay = queued - est_delay; 1342 ctx->queued += bytes; 1359 unsigned int queued = 0; local 1398 queued++; 1402 urb_ctx_queue_advance(subs, urb, queued); 1487 ctx->queued = 0; 1612 if (ctx->queued) { [all...] |
/linux-master/drivers/gpio/ |
H A D | gpio-virtio.c | 36 bool queued; member in struct:vgpio_irq_line 213 if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled)) 228 irq_line->queued = true; 372 irq_line->queued = false;
|
/linux-master/drivers/net/ethernet/mediatek/ |
H A D | mtk_wed_wo.c | 108 if (!q->queued) 117 q->queued--; 140 while (q->queued < q->n_desc) { 171 q->queued++;
|
/linux-master/net/rds/ |
H A D | send.c | 131 * - queued acks can be delayed behind large messages 133 * - small message latency is higher behind queued large messages 153 * sendmsg calls here after having queued its message on the send 435 * not try and send their newly queued message. We need to check the 678 * Transports call here when they've determined that the receiver queued 813 * we only want this to fire once so we use the callers 'queued'. It's 820 __be16 dport, int *queued) 825 if (*queued) 872 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", 876 *queued 817 rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, struct rds_conn_path *cp, struct rds_message *rm, __be16 sport, __be16 dport, int *queued) argument 1110 int queued = 0, allocated_mr = 0; local [all...] |
/linux-master/net/mac80211/ |
H A D | offchannel.c | 98 * Stop queues and transmit all frames queued by the driver 536 bool queued = false, combine_started = true; local 619 queued = true; 647 queued = true; 654 queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp); 655 if (queued) 657 /* if it wasn't queued, perhaps it can be combined with 665 if (!queued)
|
/linux-master/drivers/tty/serial/ |
H A D | amba-pl011.c | 248 bool queued; member in struct:pl011_dmatx_data 543 if (uap->dmatx.queued) 557 * a TX buffer completing, we must update the tx queued status to 562 uap->dmatx.queued = false; 581 * 1 if we queued up a TX DMA buffer. 602 uap->dmatx.queued = false; 635 uap->dmatx.queued = false; 644 uap->dmatx.queued = false; 665 uap->dmatx.queued = true; 685 * true if we queued [all...] |
/linux-master/drivers/acpi/apei/ |
H A D | ghes.c | 468 * Ensure any queued work has been done before we return to the context that 533 bool queued = false; local 557 queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags); 571 return queued; 684 bool queued = false; local 703 queued = ghes_handle_memory_failure(gdata, sev, sync); 709 queued = ghes_handle_arm_hw_error(gdata, sev, sync); 720 return queued;
|
/linux-master/drivers/crypto/inside-secure/ |
H A D | safexcel_hash.c | 323 u64 queued, len; local 325 queued = safexcel_queued_len(req); 326 if (queued <= HASH_CACHE_SIZE) 327 cache_len = queued; 329 cache_len = queued - areq->nbytes; 332 /* If this is not the last request and the queued data does not 335 extra = queued & (HASH_CACHE_SIZE - 1); 337 /* If this is not the last request and the queued data 347 queued -= extra; 349 if (!queued) { [all...] |
/linux-master/net/lapb/ |
H A D | lapb_in.c | 249 int queued = 0; local 403 queued = 1; 464 if (!queued)
|
/linux-master/drivers/md/dm-vdo/indexer/ |
H A D | volume.c | 54 * handles the queued reads, adding the page to the cache and updating any requests queued with it 277 /* This assignment also clears the queued flag. */ 295 /* Clear the mapping and the queued flag for the new page. */ 340 /* It's already queued, so add this request to the existing entry. */ 358 /* Mark the page as queued, so that chapter invalidation knows to cancel a read. */ 377 bool queued; local 385 queued = (index_value & VOLUME_CACHE_QUEUED_FLAG) != 0; 386 /* Check to see if it's still queued before resetting. */ 387 if (entry->invalid && queued) 649 bool queued; local [all...] |
/linux-master/drivers/net/wireless/ath/ath9k/ |
H A D | debug.h | 147 * @queued: Total MPDUs (non-aggr) queued 149 * @a_aggr: Total no. of aggregates queued 150 * @a_queued_hw: Total AMPDUs queued to hardware 172 u32 queued; member in struct:ath_tx_stats
|
/linux-master/drivers/mtd/ |
H A D | ftl.c | 572 int queued, ret; local 579 queued = 0; 590 queued = 1; 610 if (queued) {
|