Searched refs:queue (Results 451 - 475 of 1381) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/nvme/host/
H A Dmultipath.c60 blk_mq_unfreeze_queue(h->disk->queue);
70 blk_mq_freeze_queue_wait(h->disk->queue);
80 blk_freeze_queue_start(h->disk->queue);
110 * The alternate request queue that we may end up submitting
130 if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
385 * different queue via blk_steal_bios(), so we need to use the bio_split
386 * pool from the original queue to allocate the bvecs from.
550 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
551 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
552 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
[all...]
/linux-master/kernel/locking/
H A Dqspinlock.c33 * The basic principle of a queue-based spinlock can best be understood
34 * by studying a classic queue-based spinlock implementation called the
102 * Per-CPU queue node structures; we can never have more than 4 nested
168 * xchg_tail - Put in the new queue tail code word & retrieve previous one
170 * @tail : The new queue tail code word
171 * Return: The previous queue tail code word
212 * xchg_tail - Put in the new queue tail code word & retrieve previous one
214 * @tail : The new queue tail code word
215 * Return: The previous queue tail code word
297 * (queue tai
[all...]
/linux-master/drivers/dma/fsl-dpaa2-qdma/
H A Ddpdmai.c26 u8 queue; member in union:dpdmai_cmd_queue::__anon44
250 * dpdmai_set_rx_queue() - Set Rx queue configuration
254 * @queue_idx: DMA queue index
255 * @priority: Select the queue relative to number of
257 * @cfg: Rx queue configuration
286 * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
291 * @priority: Select the queue relative to number of
293 * @attr: Returned Rx queue attributes
309 cmd_params->queue = priority;
329 * dpdmai_get_tx_queue() - Retrieve Tx queue attribute
[all...]
/linux-master/drivers/soc/ti/
H A Dknav_qmss_queue.c82 * knav_queue_notify: qmss queue notfier call
84 * @inst: - qmss queue instance like accumulator
117 unsigned queue = inst->id - range->queue_base; local
121 irq = range->irqs[queue].irq;
127 if (range->irqs[queue].cpu_mask) {
128 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
142 unsigned queue = inst->id - inst->range->queue_base; local
146 irq = range->irqs[queue].irq;
389 unsigned queue; local
392 queue
[all...]
/linux-master/drivers/staging/media/tegra-video/
H A Dvi.c103 * videobuf2 queue operations
154 /* put buffer into the capture queue */
156 list_add_tail(&buf->queue, &chan->capture);
246 list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) {
248 list_del(&buf->queue);
253 list_for_each_entry_safe(buf, nbuf, &chan->done, queue) {
255 list_del(&buf->queue);
538 if (vb2_is_busy(&chan->queue))
672 if (vb2_is_busy(&chan->queue))
749 if (vb2_is_busy(&chan->queue))
[all...]
/linux-master/drivers/usb/gadget/udc/
H A Dmv_udc_core.c216 /* Removed the req from fsl_ep->queue */
217 list_del_init(&req->queue);
266 if (!(list_empty(&ep->queue))) {
268 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
500 /* Get the endpoint queue head address */
567 /* Get the endpoint queue head address */
605 INIT_LIST_HEAD(&req->queue);
692 || !list_empty(&req->queue)) {
718 /* build dtds and push them to device queue */
[all...]
/linux-master/block/
H A Dblk-throttle.c3 * Interface for controlling IO bandwidth on a request queue
29 /* A workqueue to queue throttle related work */
52 struct request_queue *queue; member in struct:throtl_data
73 * sq_to_tg - return the throl_grp the specified service queue belongs to
88 * sq_to_td - return throtl_data the specified service queue belongs to
141 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
144 blk_add_cgroup_trace_msg(__td->queue, \
147 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
435 /* Call with queue lock held */
1038 /* throtl_data may be gone, so figure out request queue b
[all...]
H A Dblk-mq-debugfs.c129 * The "state" attribute is removed when the queue is removed. Don't
130 * allow setting the state on a dying queue to avoid a use-after-free.
367 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
416 struct request_queue *q = hctx->queue;
433 struct request_queue *q = hctx->queue;
450 struct request_queue *q = hctx->queue;
467 struct request_queue *q = hctx->queue;
645 * until the queue is registered to a gendisk).
701 if (!hctx->queue->debugfs_dir)
770 lockdep_assert_held(&rqos->disk->queue
[all...]
/linux-master/drivers/char/ipmi/
H A Dkcs_bmc_cdev_ipmi.c79 wait_queue_head_t queue; member in struct:kcs_bmc_ipmi
167 wake_up_interruptible(&priv->queue);
310 poll_wait(filp, &priv->queue, wait);
329 wait_event_interruptible(priv->queue,
484 init_waitqueue_head(&priv->queue);
/linux-master/drivers/staging/rtl8712/
H A Drtl871x_mlme.c62 &(pmlmepriv->free_bss_pool.queue));
80 pnetwork = list_first_entry_or_null(&free_queue->queue,
108 list_add_tail(&pnetwork->list, &free_queue->queue);
123 list_add_tail(&pnetwork->list, &free_queue->queue);
141 phead = &scanned_queue->queue;
162 phead = &scanned_queue->queue;
263 phead = &scanned_queue->queue;
338 struct __queue *queue = &pmlmepriv->scanned_queue; local
342 phead = &queue->queue;
399 struct __queue *queue = &pmlmepriv->scanned_queue; local
1046 struct __queue *queue = NULL; local
[all...]
/linux-master/drivers/media/usb/uvc/
H A Duvc_video.c1276 struct uvc_video_queue *queue = &stream->queue; local
1281 mem = buf->mem + queue->buf_used;
1282 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
1287 queue->buf_used += nbytes;
1408 *meta_buf = uvc_queue_next_buffer(&stream->meta.queue,
1411 *video_buf = uvc_queue_next_buffer(&stream->queue, *video_buf);
1505 * The buffer queue might have been cancelled while a bulk transfer
1562 if (buf->bytesused == stream->queue.buf_used ||
1564 if (buf->bytesused == stream->queue
1583 struct uvc_video_queue *queue = &stream->queue; local
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns/
H A Dhns_ae_adapt.c106 dev_err(dsaf_dev->dev, "malloc queue fail!\n");
466 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, argument
626 struct hnae_queue *queue; local
640 queue = handle->qs[idx];
641 hns_rcb_update_stats(queue);
643 tx_bytes += queue->tx_ring.stats.tx_bytes;
644 tx_packets += queue->tx_ring.stats.tx_pkts;
645 rx_bytes += queue->rx_ring.stats.rx_bytes;
646 rx_packets += queue->rx_ring.stats.rx_pkts;
648 rx_errors += queue
[all...]
/linux-master/net/sctp/
H A Dulpqueue.c40 /* Initialize a ULP queue from a block of memory. */
184 struct sk_buff_head *queue; local
213 queue = &sk->sk_receive_queue;
224 queue = &sp->pd_lobby;
227 queue = &sk->sk_receive_queue;
232 * can queue this to the receive queue instead
236 queue = &sk->sk_receive_queue;
238 queue = &sp->pd_lobby;
242 skb_queue_splice_tail_init(skb_list, queue);
312 sctp_make_reassembled_event(struct net *net, struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process_queue_manager.c39 (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
53 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
107 pr_err("Queue id does not match any known queue\n");
122 /* Only allow one queue per process can have GWS assigned */
224 struct kfd_node *dev, struct queue **q,
261 pr_debug("PQM After init queue");
283 struct queue *q;
341 * check whether a SDMA queue can be allocated here, because
381 kq->queue->properties.queue_id = *qid;
392 WARN(1, "Invalid queue typ
[all...]
/linux-master/drivers/net/wireless/broadcom/b43/
H A Dpio.c152 p->queue = q;
209 #define destroy_queue_tx(pio, queue) do { \
210 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
211 (pio)->queue = NULL; \
214 #define destroy_queue_rx(pio, queue) do { \
215 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
216 (pio)->queue = NULL; \
352 struct b43_pio_txqueue *q = pack->queue;
[all...]
/linux-master/net/mac80211/
H A Dutil.c286 struct ieee80211_txq *queue)
289 .sta = queue->sta,
294 skb = ieee80211_tx_dequeue(&local->hw, queue);
308 struct ieee80211_txq *queue; local
314 while ((queue = ieee80211_next_txq(hw, txq->ac))) {
315 wake_tx_push_queue(local, sdata, queue);
316 ieee80211_return_txq(hw, queue, false);
433 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, argument
440 trace_wake_queue(local, queue, reason);
442 if (WARN_ON(queue >
284 wake_tx_push_queue(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_txq *queue) argument
479 ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) argument
491 ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) argument
499 __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) argument
518 ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) argument
530 ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) argument
544 int queue = info->hw_queue; local
566 int queue, i; local
618 ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) argument
[all...]
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_drv.c729 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, argument
735 if (fence_after(fence_id, queue->last_fence)) {
737 fence_id, queue->last_fence);
749 spin_lock(&queue->idr_lock);
750 fence = idr_find(&queue->fence_idr, fence_id);
753 spin_unlock(&queue->idr_lock);
778 struct msm_gpu_submitqueue *queue; local
789 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
790 if (!queue)
793 ret = wait_fence(queue, arg
[all...]
/linux-master/net/802/
H A Dgarp.c264 skb_queue_tail(&app->queue, app->pdu);
272 while ((skb = skb_dequeue(&app->queue)))
305 goto queue;
307 goto queue;
312 goto queue;
319 queue:
596 skb_queue_head_init(&app->queue);
/linux-master/kernel/
H A Dwatch_queue.c2 /* Watch queue and general notification mechanism, built on pipes
30 MODULE_DESCRIPTION("Watch queue");
91 * Post a notification to a watch queue.
216 wqueue = rcu_dereference(watch->queue);
305 * Set the filter on a watch queue.
406 * @wqueue: The watch queue to unref.
418 put_watch_queue(rcu_access_pointer(watch->queue));
442 * @wqueue: The queue to assign.
444 * Initialise a watch and set the watch queue.
451 rcu_assign_pointer(watch->queue, wqueu
[all...]
/linux-master/drivers/s390/crypto/
H A Dzcrypt_cex4.c107 * CCA queue additional device attributes
121 cca_get_info(AP_QID_CARD(zq->queue->qid),
122 AP_QID_QUEUE(zq->queue->qid),
332 * EP11 queue additional device attributes
349 ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
350 AP_QID_QUEUE(zq->queue->qid),
396 ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
397 AP_QID_QUEUE(zq->queue->qid),
645 * Probe function for CEX[45678] queue device. It always
678 zq->queue
[all...]
/linux-master/samples/v4l/
H A Dv4l2-pci-skeleton.c46 * @queue: vb2 video capture queue
63 struct vb2_queue queue; member in struct:skeleton
147 * Setup the constraints of the queue: besides setting the number of planes
151 * queue and you need to have another available for userspace processing.
258 * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued
272 * The vb2 queue ops. Note that since q->lock is set we can use the standard
372 if (vb2_is_busy(&skel->queue))
419 if (vb2_is_busy(&skel->queue))
503 if (vb2_is_busy(&skel->queue))
[all...]
/linux-master/drivers/net/wireless/ath/ath5k/
H A Dmac80211-ops.c575 unsigned int link_id, u16 queue,
582 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
587 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
595 "Configure tx [queue %d], "
597 queue, params->aifs, params->cw_min,
600 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
602 "Unable to update hardware queue %u!\n", queue);
605 ath5k_hw_reset_tx_queue(ah, queue);
574 ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) argument
/linux-master/drivers/staging/rtl8723bs/core/
H A Drtw_sta_mgt.c22 INIT_LIST_HEAD(&psta->sleep_q.queue);
66 INIT_LIST_HEAD(&pstapriv->free_sta_queue.queue);
73 INIT_LIST_HEAD(&pstapriv->sleep_q.queue);
75 INIT_LIST_HEAD(&pstapriv->wakeup_q.queue);
191 if (list_empty(&pfree_sta_queue->queue)) {
196 psta = container_of(get_next(&pfree_sta_queue->queue), struct sta_info, list);
248 INIT_LIST_HEAD(&preorder_ctrl->pending_recvframe_queue.queue);
/linux-master/drivers/net/ethernet/mediatek/
H A Dmtk_ppe_offload.c111 info->queue = path->mtk_wdma.queue;
192 int pse_port, dsa_port, queue; local
195 mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
231 queue = 3 + dsa_port;
233 queue = pse_port - 1;
235 mtk_foe_entry_set_queue(eth, foe, queue);
/linux-master/net/sched/
H A Dsch_cbs.c72 int queue; member in struct:cbs_sched_data
267 cbs.queue = q->queue;
272 pr_warn("Couldn't disable CBS offload for queue %d\n",
273 cbs.queue);
289 cbs.queue = q->queue;
423 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);

Completed in 516 milliseconds

<<11121314151617181920>>