Searched refs:queue (Results 1 - 25 of 1372) sorted by relevance

1234567891011>>

/linux-master/drivers/md/dm-vdo/
H A Dfunnel-queue.c6 #include "funnel-queue.h"
15 struct funnel_queue *queue; local
17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue);
22 * Initialize the stub entry and put it in the queue, establishing the invariant that
23 * queue->newest and queue->oldest are never null.
25 queue->stub.next = NULL;
26 queue->newest = &queue
33 vdo_free_funnel_queue(struct funnel_queue *queue) argument
38 get_oldest(struct funnel_queue *queue) argument
103 vdo_funnel_queue_poll(struct funnel_queue *queue) argument
137 vdo_is_funnel_queue_empty(struct funnel_queue *queue) argument
148 vdo_is_funnel_queue_idle(struct funnel_queue *queue) argument
[all...]
H A Dfunnel-workqueue.c15 #include "funnel-queue.h"
28 * DOC: Work queue definition.
36 /* Name of just the work queue (e.g., "cpuQ12") */
73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) argument
75 return ((queue == NULL) ?
76 NULL : container_of(queue, struct simple_work_queue, common));
79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) argument
81 return ((queue == NULL) ?
83 container_of(queue, struct round_robin_work_queue, common));
96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) argument
110 enqueue_work_queue_completion(struct simple_work_queue *queue, struct vdo_completion *completion) argument
153 run_start_hook(struct simple_work_queue *queue) argument
159 run_finish_hook(struct simple_work_queue *queue) argument
174 wait_for_next_completion(struct simple_work_queue *queue) argument
222 process_completion(struct simple_work_queue *queue, struct vdo_completion *completion) argument
233 service_work_queue(struct simple_work_queue *queue) argument
264 struct simple_work_queue *queue = ptr; local
273 free_simple_work_queue(struct simple_work_queue *queue) argument
283 free_round_robin_work_queue(struct round_robin_work_queue *queue) argument
298 vdo_free_work_queue(struct vdo_work_queue *queue) argument
317 struct simple_work_queue *queue; local
386 struct round_robin_work_queue *queue; local
444 finish_simple_work_queue(struct simple_work_queue *queue) argument
454 finish_round_robin_work_queue(struct round_robin_work_queue *queue) argument
465 vdo_finish_work_queue(struct vdo_work_queue *queue) argument
478 dump_simple_work_queue(struct simple_work_queue *queue) argument
499 vdo_dump_work_queue(struct vdo_work_queue *queue) argument
556 vdo_enqueue_work_queue(struct vdo_work_queue *queue, struct vdo_completion *completion) argument
612 struct simple_work_queue *queue = get_current_thread_work_queue(); local
617 vdo_get_work_queue_owner(struct vdo_work_queue *queue) argument
629 struct simple_work_queue *queue = get_current_thread_work_queue(); local
634 vdo_work_queue_type_is(struct vdo_work_queue *queue, const struct vdo_work_queue_type *type) argument
[all...]
/linux-master/drivers/md/dm-vdo/indexer/
H A Dfunnel-requestqueue.c12 #include "funnel-queue.h"
18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting
22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly
24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
35 * the queue to awaken immediately.
50 /* Wait queue for synchronizing producers and consumer */
68 static inline struct uds_request *poll_queues(struct uds_request_queue *queue) argument
72 entry = vdo_funnel_queue_poll(queue
83 are_queues_idle(struct uds_request_queue *queue) argument
94 dequeue_request(struct uds_request_queue *queue, struct uds_request **request_ptr, bool *waited_ptr) argument
115 wait_for_request(struct uds_request_queue *queue, bool dormant, unsigned long timeout, struct uds_request **request, bool *waited) argument
133 struct uds_request_queue *queue = arg; local
199 struct uds_request_queue *queue; local
234 wake_up_worker(struct uds_request_queue *queue) argument
240 uds_request_queue_enqueue(struct uds_request_queue *queue, struct uds_request *request) argument
257 uds_request_queue_finish(struct uds_request_queue *queue) argument
[all...]
H A Dfunnel-requestqueue.h12 * A simple request queue which will handle new requests in the order in which they are received,
26 void uds_request_queue_enqueue(struct uds_request_queue *queue,
29 void uds_request_queue_finish(struct uds_request_queue *queue);
/linux-master/drivers/net/wireguard/
H A Dqueueing.c25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, argument
30 memset(queue, 0, sizeof(*queue));
31 queue->last_cpu = -1;
32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
36 if (!queue->worker) {
37 ptr_ring_cleanup(&queue->ring, NULL);
43 void wg_packet_queue_free(struct crypt_queue *queue, boo argument
53 wg_prev_queue_init(struct prev_queue *queue) argument
66 __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) argument
72 wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) argument
80 wg_prev_queue_dequeue(struct prev_queue *queue) argument
[all...]
/linux-master/drivers/scsi/arm/
H A Dqueue.h3 * linux/drivers/acorn/scsi/queue.h: queue handling
18 * Function: void queue_initialise (Queue_t *queue)
19 * Purpose : initialise a queue
20 * Params : queue - queue to initialise
22 extern int queue_initialise (Queue_t *queue);
25 * Function: void queue_free (Queue_t *queue)
26 * Purpose : free a queue
27 * Params : queue
[all...]
H A Dqueue.c3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives
50 #include "queue.h"
55 * Function: void queue_initialise (Queue_t *queue)
56 * Purpose : initialise a queue
57 * Params : queue - queue to initialise
59 int queue_initialise (Queue_t *queue) argument
64 spin_lock_init(&queue->queue_lock);
65 INIT_LIST_HEAD(&queue
91 queue_free(Queue_t *queue) argument
107 __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) argument
138 __queue_remove(Queue_t *queue, struct list_head *ent) argument
162 queue_remove_exclude(Queue_t *queue, unsigned long *exclude) argument
188 queue_remove(Queue_t *queue) argument
210 queue_remove_tgtluntag(Queue_t *queue, int target, int lun, int tag) argument
238 queue_remove_all_target(Queue_t *queue, int target) argument
261 queue_probetgtlun(Queue_t *queue, int target, int lun) argument
287 queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) argument
[all...]
/linux-master/include/drm/
H A Dspsc_queue.h30 /** SPSC lockless queue */
48 static inline void spsc_queue_init(struct spsc_queue *queue) argument
50 queue->head = NULL;
51 atomic_long_set(&queue->tail, (long)&queue->head);
52 atomic_set(&queue->job_count, 0);
55 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) argument
57 return queue->head;
60 static inline int spsc_queue_count(struct spsc_queue *queue) argument
62 return atomic_read(&queue
65 spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) argument
89 spsc_queue_pop(struct spsc_queue *queue) argument
[all...]
/linux-master/drivers/media/usb/uvc/
H A Duvc_queue.c24 * Video buffers queue management.
30 * the videobuf2 queue operations by serializing calls to videobuf2 and a
31 * spinlock to protect the IRQ queue that holds the buffers to be processed by
43 * This function must be called with the queue spinlock held.
45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, argument
52 while (!list_empty(&queue->irqqueue)) {
53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
55 queue);
56 list_del(&buf->queue);
63 * videobuf2 queue operation
70 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); local
101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
129 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
152 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
162 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); local
183 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); local
215 uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, int drop_corrupted) argument
251 uvc_queue_release(struct uvc_video_queue *queue) argument
262 uvc_request_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) argument
274 uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) argument
285 uvc_create_buffers(struct uvc_video_queue *queue, struct v4l2_create_buffers *cb) argument
297 uvc_queue_buffer(struct uvc_video_queue *queue, struct media_device *mdev, struct v4l2_buffer *buf) argument
309 uvc_export_buffer(struct uvc_video_queue *queue, struct v4l2_exportbuffer *exp) argument
321 uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) argument
333 uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) argument
344 uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) argument
355 uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) argument
361 uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) argument
368 uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) argument
387 uvc_queue_allocated(struct uvc_video_queue *queue) argument
410 uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) argument
435 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) argument
443 uvc_queue_get_current_buffer(struct uvc_video_queue *queue) argument
462 uvc_queue_buffer_requeue(struct uvc_video_queue *queue, struct uvc_buffer *buf) argument
477 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
503 uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) argument
[all...]
/linux-master/drivers/usb/gadget/function/
H A Duvc_queue.c26 * Video buffers queue management.
32 * the videobuf2 queue operations by serializing calls to videobuf2 and a
33 * spinlock to protect the IRQ queue that holds the buffers to be processed by
38 * videobuf2 queue operations
45 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); local
46 struct uvc_video *video = container_of(queue, struct uvc_video, queue);
73 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
83 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
87 if (queue
104 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); local
133 uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type, struct mutex *lock) argument
171 uvcg_free_buffers(struct uvc_video_queue *queue) argument
179 uvcg_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) argument
189 uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) argument
194 uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) argument
203 uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) argument
215 uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) argument
221 uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) argument
232 uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) argument
251 uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) argument
295 uvcg_queue_enable(struct uvc_video_queue *queue, int enable) argument
330 uvcg_complete_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) argument
349 uvcg_queue_head(struct uvc_video_queue *queue) argument
[all...]
H A Duvc_queue.h33 struct list_head queue; member in struct:uvc_buffer
48 struct vb2_queue queue; member in struct:uvc_video_queue
61 static inline int uvc_queue_streaming(struct uvc_video_queue *queue) argument
63 return vb2_is_streaming(&queue->queue);
66 int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type,
69 void uvcg_free_buffers(struct uvc_video_queue *queue);
71 int uvcg_alloc_buffers(struct uvc_video_queue *queue,
74 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
76 int uvcg_queue_buffer(struct uvc_video_queue *queue, struc
[all...]
/linux-master/drivers/crypto/cavium/zip/
H A Dzip_device.c50 * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
53 * @queue: Queue number
55 * Return: Bytes consumed in the command queue buffer.
57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) argument
59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
64 * zip_load_instr - Submits the instruction into the ZIP command queue
69 * This function copies the ZIP instruction to the command queue and rings the
71 * queue is maintained in a circular fashion. When there is space for exactly
72 * one instruction in the queue, nex
81 u32 queue = 0; local
175 zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) argument
[all...]
/linux-master/drivers/net/wireless/st/cw1200/
H A Dqueue.c3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
12 #include "queue.h"
27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) argument
29 struct cw1200_queue_stats *stats = queue->stats;
30 if (queue->tx_locked_cnt++ == 0) {
32 queue->queue_id);
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) argument
39 struct cw1200_queue_stats *stats = queue->stats;
40 BUG_ON(!queue
89 __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) argument
135 struct cw1200_queue *queue = local
164 cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) argument
202 cw1200_queue_clear(struct cw1200_queue *queue) argument
244 cw1200_queue_deinit(struct cw1200_queue *queue) argument
256 cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) argument
280 cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) argument
331 cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) argument
372 cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) argument
414 cw1200_queue_requeue_all(struct cw1200_queue *queue) argument
441 cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) argument
492 cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) argument
522 cw1200_queue_lock(struct cw1200_queue *queue) argument
529 cw1200_queue_unlock(struct cw1200_queue *queue) argument
536 cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) argument
[all...]
/linux-master/drivers/iio/buffer/
H A Dindustrialio-buffer-dma.c33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
51 * incoming or outgoing queue the block will be freed.
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
103 iio_buffer_put(&block->queue->buffer);
166 struct iio_dma_buffer_queue *queue, size_t size)
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
183 block->queue
165 iio_dma_buffer_alloc_block( struct iio_dma_buffer_queue *queue, size_t size) argument
207 struct iio_dma_buffer_queue *queue = block->queue; local
229 iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, struct list_head *list) argument
273 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); local
349 iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) argument
372 iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) argument
416 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); local
442 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); local
455 iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) argument
468 iio_dma_buffer_dequeue( struct iio_dma_buffer_queue *queue) argument
503 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); local
558 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); local
637 iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev, const struct iio_dma_buffer_ops *ops) argument
662 iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) argument
681 iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) argument
[all...]
/linux-master/drivers/net/ethernet/ti/icssg/
H A Dicssg_queues.c2 /* ICSSG Buffer queue helpers
16 int icssg_queue_pop(struct prueth *prueth, u8 queue) argument
20 if (queue >= ICSSG_QUEUES_MAX)
23 regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &cnt);
27 regmap_read(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, &val);
32 void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) argument
34 if (queue >= ICSSG_QUEUES_MAX)
37 regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr);
40 u32 icssg_queue_level(struct prueth *prueth, int queue) argument
44 if (queue >
[all...]
/linux-master/drivers/net/wireless/broadcom/b43legacy/
H A Dpio.c22 static void tx_start(struct b43legacy_pioqueue *queue) argument
24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
28 static void tx_octet(struct b43legacy_pioqueue *queue, argument
31 if (queue->need_workarounds) {
32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
63 static void tx_data(struct b43legacy_pioqueue *queue, argument
71 if (queue
89 tx_complete(struct b43legacy_pioqueue *queue, struct sk_buff *skb) argument
103 generate_cookie(struct b43legacy_pioqueue *queue, struct b43legacy_pio_txpacket *packet) argument
141 struct b43legacy_pioqueue *queue = NULL; local
172 pio_tx_write_fragment(struct b43legacy_pioqueue *queue, struct sk_buff *skb, struct b43legacy_pio_txpacket *packet, size_t txhdr_size) argument
205 struct b43legacy_pioqueue *queue = packet->queue; local
219 struct b43legacy_pioqueue *queue = packet->queue; local
269 struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask); local
299 setup_txqueues(struct b43legacy_pioqueue *queue) argument
319 struct b43legacy_pioqueue *queue; local
367 cancel_transfers(struct b43legacy_pioqueue *queue) argument
379 b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) argument
409 struct b43legacy_pioqueue *queue; local
455 struct b43legacy_pioqueue *queue = dev->pio.queue1; local
477 struct b43legacy_pioqueue *queue; local
535 pio_rx_error(struct b43legacy_pioqueue *queue, int clear_buffers, const char *error) argument
553 b43legacy_pio_rx(struct b43legacy_pioqueue *queue) argument
634 b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) argument
642 b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) argument
[all...]
/linux-master/include/linux/soc/ixp4xx/
H A Dqmgr.h18 #define QUEUE_STAT1_EMPTY 1 /* queue status bits */
34 /* queue interrupt request conditions */
48 u32 statne_h; /* 0x418 - queue nearly empty */
49 u32 statf_h; /* 0x41C - queue full */
57 void qmgr_put_entry(unsigned int queue, u32 val);
58 u32 qmgr_get_entry(unsigned int queue);
59 int qmgr_stat_empty(unsigned int queue);
60 int qmgr_stat_below_low_watermark(unsigned int queue);
61 int qmgr_stat_full(unsigned int queue);
62 int qmgr_stat_overflow(unsigned int queue);
[all...]
/linux-master/drivers/net/xen-netback/
H A Drx.c42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, argument
55 WRITE_ONCE(queue->rx_slots_needed, needed);
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) argument
63 needed = READ_ONCE(queue->rx_slots_needed);
68 prod = queue->rx.sring->req_prod;
69 cons = queue->rx.req_cons;
74 queue->rx.sring->req_event = prod + 1;
80 } while (queue->rx.sring->req_prod != prod);
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) argument
90 spin_lock_irqsave(&queue
111 xenvif_rx_dequeue(struct xenvif_queue *queue) argument
135 xenvif_rx_queue_purge(struct xenvif_queue *queue) argument
143 xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) argument
159 xenvif_rx_copy_flush(struct xenvif_queue *queue) argument
193 xenvif_rx_copy_add(struct xenvif_queue *queue, struct xen_netif_rx_request *req, unsigned int offset, void *data, size_t len) argument
252 xenvif_rx_next_skb(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) argument
328 xenvif_rx_complete(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt) argument
355 xenvif_rx_next_chunk(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, unsigned int offset, void **data, size_t *len) argument
393 xenvif_rx_data_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) argument
437 xenvif_rx_extra_slot(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) argument
461 xenvif_rx_skb(struct xenvif_queue *queue) argument
491 xenvif_rx_action(struct xenvif_queue *queue) argument
510 xenvif_rx_queue_slots(const struct xenvif_queue *queue) argument
520 xenvif_rx_queue_stalled(const struct xenvif_queue *queue) argument
530 xenvif_rx_queue_ready(struct xenvif_queue *queue) argument
537 xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) argument
547 xenvif_rx_queue_timeout(struct xenvif_queue *queue) argument
570 xenvif_wait_for_rx_work(struct xenvif_queue *queue) argument
595 xenvif_queue_carrier_off(struct xenvif_queue *queue) argument
610 xenvif_queue_carrier_on(struct xenvif_queue *queue) argument
628 struct xenvif_queue *queue = data; local
[all...]
/linux-master/block/
H A Dblk-mq-virtio.c12 * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
13 * @qmap: CPU to hardware queue map.
19 * corresponding to each queue for it's affinity mask and built queue mapping
20 * that maps a queue to the CPUs that have irq affinity for the corresponding
27 unsigned int queue, cpu; local
32 for (queue = 0; queue < qmap->nr_queues; queue++) {
33 mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
[all...]
H A Dblk-mq-pci.c14 * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
15 * @qmap: CPU to hardware queue map.
21 * corresponding to each queue for it's affinity mask and built queue mapping
22 * that maps a queue to the CPUs that have irq affinity for the corresponding
29 unsigned int queue, cpu; local
31 for (queue = 0; queue < qmap->nr_queues; queue++) {
32 mask = pci_irq_get_affinity(pdev, queue
[all...]
/linux-master/drivers/gpu/drm/v3d/
H A Dv3d_sysfs.c16 enum v3d_queue queue; local
20 len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n");
22 for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
23 struct v3d_stats *stats = &v3d->queue[queue].stats;
28 /* Each line will display the queue name, timestamp, the number
29 * of jobs sent to that queue and the runtime, as can be seem here:
31 * queue timestam
[all...]
H A Dv3d_fence.c6 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) argument
15 fence->queue = queue;
16 fence->seqno = ++v3d->queue[queue].emit_seqno;
18 v3d->queue[queue].fence_context, fence->seqno);
32 switch (f->queue) {
/linux-master/drivers/net/ethernet/stmicro/stmmac/
H A Dstmmac_xdp.c10 struct xsk_buff_pool *pool, u16 queue)
12 struct stmmac_channel *ch = &priv->channel[queue];
17 if (queue >= priv->plat->rx_queues_to_use ||
18 queue >= priv->plat->tx_queues_to_use)
39 stmmac_disable_rx_queue(priv, queue);
40 stmmac_disable_tx_queue(priv, queue);
43 set_bit(queue, priv->af_xdp_zc_qps);
46 stmmac_enable_rx_queue(priv, queue);
47 stmmac_enable_tx_queue(priv, queue);
50 err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_R
9 stmmac_xdp_enable_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, u16 queue) argument
58 stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) argument
95 stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, u16 queue) argument
[all...]
/linux-master/drivers/misc/genwqe/
H A Dcard_ddcb.c14 * Device Driver Control Block (DDCB) queue support. Definition of
15 * interrupt handlers for queue support as well as triggering the
40 * Situation (1): Empty queue
82 static int queue_empty(struct ddcb_queue *queue) argument
84 return queue->ddcb_next == queue->ddcb_act;
87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) argument
89 if (queue->ddcb_next >= queue->ddcb_act)
90 return queue
95 queue_free_ddcbs(struct ddcb_queue *queue) argument
163 print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) argument
265 enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, struct ddcb *pddcb, int ddcb_no) argument
332 struct ddcb_queue *queue = req->queue; local
365 genwqe_check_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) argument
481 struct ddcb_queue *queue; local
506 struct ddcb_queue *queue = req->queue; local
567 get_next_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, int *num) argument
625 struct ddcb_queue *queue = req->queue; local
759 struct ddcb_queue *queue; local
981 struct ddcb_queue *queue = &cd->queue; local
1012 struct ddcb_queue *queue = &cd->queue; local
1021 setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) argument
1098 ddcb_queue_initialized(struct ddcb_queue *queue) argument
1103 free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) argument
1231 struct ddcb_queue *queue; local
1320 struct ddcb_queue *queue = &cd->queue; local
1347 struct ddcb_queue *queue = &cd->queue; local
[all...]
/linux-master/drivers/nvme/target/
H A Dtcp.c66 * queue before determining it to be idle. This optional module behavior
110 struct nvmet_tcp_queue *queue; member in struct:nvmet_tcp_cmd
221 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, argument
224 if (unlikely(!queue->nr_cmds)) {
229 return cmd - queue->cmds;
257 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) argument
261 cmd = list_first_entry_or_null(&queue->free_list,
277 if (unlikely(cmd == &cmd->queue->connect))
280 list_add_tail(&cmd->entry, &cmd->queue->free_list);
283 static inline int queue_cpu(struct nvmet_tcp_queue *queue) argument
288 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) argument
293 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) argument
308 nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, void *pdu, size_t len) argument
334 nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) argument
390 nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) argument
399 nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) argument
456 struct nvmet_tcp_queue *queue = cmd->queue; local
489 struct nvmet_tcp_queue *queue = cmd->queue; local
514 struct nvmet_tcp_queue *queue = cmd->queue; local
531 nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) argument
543 nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) argument
573 struct nvmet_tcp_queue *queue = cmd->queue; local
633 struct nvmet_tcp_queue *queue = cmd->queue; local
745 struct nvmet_tcp_queue *queue = cmd->queue; local
778 nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, bool last_in_batch) argument
827 nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, int budget, int *sends) argument
846 nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) argument
854 nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) argument
863 nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) argument
890 nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) argument
950 nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) argument
981 nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) argument
1030 nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) argument
1141 nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, struct msghdr *msg, char *cbuf) argument
1176 nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) argument
1241 struct nvmet_tcp_queue *queue = cmd->queue; local
1249 nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) argument
1282 nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) argument
1333 nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) argument
1367 nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, int budget, int *recvs) argument
1388 struct nvmet_tcp_queue *queue = local
1395 nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) argument
1409 nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) argument
1414 nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, int ops) argument
1428 struct nvmet_tcp_queue *queue = local
1458 nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *c) argument
1514 nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) argument
1540 nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) argument
1552 nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) argument
1564 nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) argument
1580 nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) argument
1592 struct nvmet_tcp_queue *queue = local
1621 struct nvmet_tcp_queue *queue; local
1639 struct nvmet_tcp_queue *queue; local
1661 struct nvmet_tcp_queue *queue; local
1686 nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) argument
1742 nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) argument
1793 struct nvmet_tcp_queue *queue = data; local
1819 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), local
1839 nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) argument
1875 struct nvmet_tcp_queue *queue; local
2088 struct nvmet_tcp_queue *queue; local
2118 struct nvmet_tcp_queue *queue; local
2129 struct nvmet_tcp_queue *queue = local
2162 struct nvmet_tcp_queue *queue = cmd->queue; local
2203 struct nvmet_tcp_queue *queue; local
[all...]

Completed in 392 milliseconds

1234567891011>>