Searched refs:queue (Results 126 - 150 of 1368) sorted by last modified time

1234567891011>>

/linux-master/drivers/staging/octeon/
H A Dethernet-mdio.c69 pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
72 priv->port, priv->queue);
/linux-master/drivers/rpmsg/
H A Drpmsg_char.c50 * @queue_lock: synchronization of @queue operations
51 * @queue: incoming message queue
52 * @readq: wait object for incoming queue
70 struct sk_buff_head queue; member in struct:rpmsg_eptdev
114 skb_queue_tail(&eptdev->queue, skb);
195 skb_queue_purge(&eptdev->queue);
215 /* Wait for data in the queue */
216 if (skb_queue_empty(&eptdev->queue)) {
224 !skb_queue_empty(&eptdev->queue) ||
[all...]
/linux-master/drivers/nvme/host/
H A Dpr.c62 ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
72 return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
H A Dapple.c68 * admin and the IO queue share the same tag space. Additionally, tags
70 * queue depth to 0x40. Instead of wasting half of that on the admin queue
92 * admin queue): Those commands must still be added to the NVMMU but the DMA
95 * Programming the PRPs to the same values as those in the submission queue
98 * In that setting the NVMe driver first programs the submission queue entry
103 * Since Linux doesn't do any of that we may as well just point both the queue
125 * The Apple NVMe controller only supports a single admin and a single IO queue
128 * The completion queue works as usual. The submission "queue" instea
[all...]
/linux-master/drivers/net/wireguard/
H A Dreceive.c181 * packets in the queue and not send a keepalive, which
183 * queue, it will send a keepalive, in order to give
193 WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
208 struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; local
209 struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
212 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
495 struct crypt_queue *queue = container_of(work, struct multicore_worker, local
499 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
/linux-master/drivers/net/ethernet/mediatek/
H A Dmtk_ppe.c465 unsigned int queue)
471 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
475 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
464 mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, unsigned int queue) argument
H A Dmtk_eth_soc.c1410 int queue = skb_get_queue_mapping(skb); local
1413 txq = netdev_get_tx_queue(dev, queue);
1461 txd_info.qid = queue;
1606 "Tx Ring full when queue awake!\n");
3982 /* PSE config input queue threshold */
3992 /* PSE config output queue threshold */
4434 unsigned int queue = 0; local
4437 queue = skb_get_queue_mapping(skb) + 3;
4439 queue = mac->id;
4441 if (queue >
[all...]
/linux-master/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_cmn.h121 * bnx2x_setup_queue - setup eth queue.
132 * bnx2x_setup_leading - bring up a leading eth queue.
496 int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
532 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1200 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
/linux-master/drivers/mtd/ubi/
H A Dblock.c421 dev->rq = gd->queue;
456 /* Finally destroy the blk queue */
/linux-master/drivers/iio/buffer/
H A Dindustrialio-buffer-dmaengine.c33 struct iio_dma_buffer_queue queue; member in struct:dmaengine_buffer
45 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
54 spin_lock_irqsave(&block->queue->list_lock, flags);
56 spin_unlock_irqrestore(&block->queue->list_lock, flags);
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, argument
65 iio_buffer_to_dmaengine_buffer(&queue->buffer);
86 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
88 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
95 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) argument
98 iio_buffer_to_dmaengine_buffer(&queue
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_gfx.c44 int pipe, int queue)
51 bit += queue;
57 int *mec, int *pipe, int *queue)
59 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
68 int xcc_id, int mec, int pipe, int queue)
70 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
75 int me, int pipe, int queue)
82 bit += queue;
88 int *me, int *pipe, int *queue)
90 *queue
43 amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue) argument
56 amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue) argument
67 amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, int mec, int pipe, int queue) argument
74 amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue) argument
87 amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, int *me, int *pipe, int *queue) argument
97 amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue) argument
176 int queue = ring->queue; local
210 int i, j, queue, pipe; local
246 int i, queue, pipe; local
276 int mec, pipe, queue; local
591 int mec, pipe, queue; local
[all...]
/linux-master/drivers/s390/virtio/
H A Dvirtio_ccw.c119 dma64_t queue; member in struct:vq_info_block_legacy
488 info->info_block->l.queue = 0;
511 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
548 u64 queue; local
557 /* Allocate queue. */
595 queue = virtqueue_get_desc_addr(vq);
597 info->info_block->l.queue = u64_to_dma64(queue);
603 info->info_block->s.desc = u64_to_dma64(queue);
739 /* Register queue indicator
[all...]
/linux-master/drivers/s390/crypto/
H A Dzcrypt_api.c568 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) argument
570 return test_bit_inv(queue, perms->aqm) ? true : false;
578 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
582 get_device(&zq->queue->ap_dev.device);
586 *pmod = zq->queue->ap_dev.device.driver->owner;
598 put_device(&zq->queue->ap_dev.device);
629 return zq->queue->total_request_count <
630 pref_zq->queue->total_request_count;
696 !ap_queue_usable(zq->queue))
698 /* check if device node has admission for this queue */
1274 int card, queue; local
1301 int card, queue; local
1322 zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstat) argument
[all...]
/linux-master/drivers/s390/cio/
H A Dqdio_setup.c3 * qdio queue initialization
147 /* queue must be cleared for qdio_establish */
292 static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue) argument
294 desc->sliba = virt_to_dma64(queue->slib);
295 desc->sla = virt_to_dma64(queue->sl);
296 desc->slsba = virt_to_dma64(&queue->slsb);
/linux-master/drivers/s390/block/
H A Dscm_blk.c286 struct scm_device *scmdev = hctx->queue->queuedata;
477 rq = bdev->rq = bdev->gendisk->queue;
H A Ddcssblk.c646 blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
H A Ddasd.c331 lim = queue_limits_start_update(block->gdp->queue);
348 rc = queue_limits_commit_update(block->gdp->queue, &lim);
413 * the requeueing of requests from the linux request queue to the
414 * ccw queue.
568 /* queue call to dasd_kick_device to the kernel event daemon. */
589 /* queue call to dasd_reload_device to the kernel event daemon. */
1487 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1497 /* re-activate request queue */
1575 blk_mq_run_hw_queues(device->block->gdp->queue, true);
1788 /* Start first request on queue i
[all...]
/linux-master/sound/core/
H A Dtimer.c79 struct snd_timer_read *queue; member in struct:snd_timer_user
95 unsigned int overrun; /* count of read queue overruns */
96 unsigned int queue; /* used queue size */ member in struct:snd_timer_status32
107 unsigned int overrun; /* count of read queue overruns */
108 unsigned int queue; /* used queue size */ member in struct:snd_timer_status64
1286 r = &tu->queue[prev];
1295 r = &tu->queue[tu->qtail++];
1411 struct snd_timer_read *queue local
[all...]
/linux-master/include/trace/events/
H A Dsunrpc.h446 " flags=%s runstate=%s status=%d timeout=%lu queue=%s",
2202 DEFINE_SVC_DEFERRED_EVENT(queue); variable
/linux-master/fs/nfsd/
H A Dtrace.h1498 DEFINE_NFSD_CB_LIFETIME_EVENT(queue); variable
/linux-master/drivers/virtio/
H A Dvirtio_ring.c90 /* Actual memory layout for this queue. */
119 /* Actual memory layout for this queue. */
319 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); local
321 if (queue) {
322 phys_addr_t phys_addr = virt_to_phys(queue);
337 free_pages_exact(queue, PAGE_ALIGN(size));
341 return queue;
346 void *queue, dma_addr_t dma_handle,
350 dma_free_coherent(dma_dev, size, queue, dma_handle);
352 free_pages_exact(queue, PAGE_ALIG
345 vring_free_queue(struct virtio_device *vdev, size_t size, void *queue, dma_addr_t dma_handle, struct device *dma_dev) argument
1097 void *queue = NULL; local
[all...]
/linux-master/drivers/vhost/
H A Dnet.c103 void **queue; member in struct:vhost_net_buf
153 return rxq->queue[rxq->head];
180 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
190 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
1266 void **queue; local
1279 queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1281 if (!queue) {
1286 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1292 kfree(queue);
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_verbs.c478 attr->max_wr = srq->rq.queue->buf->index_mask;
871 full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
873 rxe_err_qp(qp, "send queue full\n");
877 send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
880 queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
959 full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
962 rxe_dbg("queue full\n");
983 recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
994 queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
1154 cqe = queue_head(cq->queue, QUEUE_TYPE_TO_UL
[all...]
H A Drxe_resp.c49 /* rxe_recv calls here to add a request packet to the input queue */
272 struct rxe_queue *q = srq->rq.queue;
328 * recycle the responder resource queue
340 qp->resp.wqe = queue_head(qp->rq.queue,
1142 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
1413 /* drain incoming request packet queue */
1450 /* drain and optionally complete the recive queue
1456 struct rxe_queue *q = qp->rq.queue;
1472 /* recv queue not created. nothing to do. */
1473 if (!qp->rq.queue)
[all...]
H A Drxe_qp.c201 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
203 if (!qp->sq.queue) {
204 rxe_err_qp(qp, "Unable to allocate send queue\n");
209 /* prepare info for caller to mmap send queue if user space qp */
211 qp->sq.queue->buf, qp->sq.queue->buf_size,
212 &qp->sq.queue->ip);
228 vfree(qp->sq.queue->buf);
229 kfree(qp->sq.queue);
230 qp->sq.queue
[all...]

Completed in 278 milliseconds

1234567891011>>