Searched refs:queue (Results 201 - 225 of 1368) sorted by last modified time

1234567891011>>

/linux-master/sound/core/seq/
H A Dseq_timer.c246 sprintf(str, "sequencer queue %i", q->queue);
257 err = snd_timer_open(t, &tmr->alsa_id, q->queue);
267 err = snd_timer_open(t, &tid, q->queue);
419 /* TODO: use interpolation on tick queue (will only be useful for very
450 snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
H A Dseq_ports.c482 return r->queue == s->queue;
/linux-master/sound/core/seq/oss/
H A Dseq_oss_midi.c368 subs.queue = dp->queue; /* queue for timestamps */
473 ev.queue = dp->queue;
607 * decode event and send MIDI bytes to read queue
H A Dseq_oss_init.c28 MODULE_PARM_DESC(maxqlen, "maximum queue length");
45 static int delete_seq_queue(int queue);
177 dp->queue = -1;
208 /* allocate queue */
216 /*dp->addr.queue = dp->queue;*/
224 /* initialize read queue */
233 /* initialize write queue */
267 delete_seq_queue(dp->queue);
339 * allocate a queue
362 delete_seq_queue(int queue) argument
402 int queue; local
[all...]
H A Dseq_oss_device.h75 int queue; /* sequencer queue number */ member in struct:seq_oss_devinfo
90 /* output queue */
93 /* midi input queue */
158 ev->queue = dp->queue;
/linux-master/kernel/
H A Dsignal.c400 * allocate a new signal queue record
454 void flush_sigqueue(struct sigpending *queue) argument
458 sigemptyset(&queue->signal);
459 while (!list_empty(&queue->list)) {
460 q = list_entry(queue->list.next, struct sigqueue , list);
597 * Ok, it wasn't in the queue. This must be
599 * out of queue space. So zero out the info.
714 * Might a synchronous signal be in the queue?
720 * Return the first synchronous signal in the queue.
778 * Remove signals in mask from the pending set and queue
[all...]
/linux-master/include/linux/spi/
H A Dspi.h431 * @transfer: adds a message to the controller's transfer queue.
437 * @queued: whether this controller is providing an internal message queue
440 * @queue_lock: spinlock to synchronise access to message queue
441 * @queue: message queue
459 * @rt: whether this queue is set to run as a realtime task
464 * @prepare_transfer_hardware: a message will soon arrive from the queue
473 * queue so the subsystem notifies the driver that it may relax the
532 * @queue_empty: signal green light for opportunistically skipping the queue
543 * a queue o
701 struct list_head queue; member in struct:spi_controller
1183 struct list_head queue; member in struct:spi_message
[all...]
/linux-master/include/linux/
H A Diommu.h137 * struct iopf_queue - IO Page Fault queue
139 * @devices: devices attached to this queue
166 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
249 * usefully support the non-strict DMA flush queue.
614 * queue
685 * @queue: IOPF queue
686 * @queue_list: index into queue->devices
697 struct iopf_queue *queue; member in struct:iommu_fault_param
1569 int iopf_queue_add_device(struct iopf_queue *queue, struc
1581 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) argument
1587 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) argument
1601 iopf_queue_free(struct iopf_queue *queue) argument
1605 iopf_queue_discard_partial(struct iopf_queue *queue) argument
[all...]
/linux-master/drivers/usb/gadget/udc/
H A Dmax3420_udc.c151 struct list_head queue; member in struct:max3420_req
158 struct list_head queue; member in struct:max3420_ep
691 if (list_empty(&ep->queue))
694 req = list_first_entry(&ep->queue, struct max3420_req, queue);
732 list_del_init(&req->queue);
962 list_for_each_entry_safe(req, r, &ep->queue, queue) {
963 list_del_init(&req->queue);
1036 list_add_tail(&req->queue,
[all...]
/linux-master/drivers/perf/
H A Dxgene_pmu.c350 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
351 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
560 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
561 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
562 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
H A Dfsl_imx8_ddr_perf.c256 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
257 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
/linux-master/drivers/nvdimm/
H A Dpmem.c215 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
508 q = disk->queue;
/linux-master/drivers/mmc/host/
H A Ddw_mmc.c1391 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1411 list_add_tail(&slot->queue_node, &host->queue);
1900 if (!list_empty(&host->queue)) {
1901 slot = list_entry(host->queue.next,
3353 INIT_LIST_HEAD(&host->queue);
/linux-master/drivers/mmc/core/
H A Dqueue.c17 #include "queue.h"
140 struct request_queue *q = mq->queue;
382 mq->queue = disk->queue;
385 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
386 blk_queue_rq_timeout(mq->queue, 60 * HZ);
388 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
389 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
391 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
400 mmc_crypto_setup_queue(mq->queue, hos
[all...]
/linux-master/drivers/media/usb/msi2500/
H A Dmsi2500.c112 /* videobuf2 queue and queued buffers list */
1194 /* Init videobuf2 queue structure */
1204 dev_err(dev->dev, "Could not initialize vb2 queue\n");
1210 dev->vdev.queue = &dev->vb_queue;
1211 dev->vdev.queue->lock = &dev->vb_queue_lock;
/linux-master/drivers/md/dm-vdo/
H A Dvdo.c334 * get_thread_name() - Format the name of the worker thread desired to support a given work queue.
351 * known as the "request queue."
406 * @type: The description of the work queue for this thread.
408 * @contexts: An array of queue_count contexts, one for each individual queue; may be NULL.
411 * config, and completions can be enqueued to the queue and run on the threads comprising this
426 if (thread->queue != NULL) {
427 return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
436 type, queue_count, contexts, &thread->queue);
614 *reason = "bio ack queue initialization failed";
623 *reason = "CPU queue initializatio
1608 struct vdo_work_queue *queue = vdo_get_current_work_queue(); local
[all...]
H A Dvdo.h59 struct vdo_work_queue *queue; member in struct:vdo_thread
266 * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
273 * Return: Whether a bio-acknowledgement work queue is in use.
H A Dslab-depot.c36 #include "wait-queue.h"
243 * notify_summary_waiters() - Wake all the waiters in a given queue.
244 * @allocator: The block allocator summary which owns the queue.
245 * @queue: The queue to notify.
248 struct vdo_wait_queue *queue)
253 vdo_waitq_notify_all_waiters(queue, NULL, &result);
1092 /* Re-queue the block if it was re-dirtied while it was writing. */
1263 * dirty_block() - Mark a reference count block as dirty, potentially adding it to the dirty queue
1763 * add_entries() - Add as many entries as possible from the queue o
247 notify_summary_waiters(struct block_allocator *allocator, struct vdo_wait_queue *queue) argument
[all...]
H A Dpriority-table.c25 * The head of a queue of table entries, all having the same priority
27 struct list_head queue; member in struct:bucket
34 * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority
72 INIT_LIST_HEAD(&bucket->queue);
116 list_del_init(&table->buckets[priority].queue);
120 * vdo_priority_table_enqueue() - Add a new entry to the priority table, appending it to the queue
133 /* Append the entry to the queue in the specified bucket. */
134 list_move_tail(entry, &table->buckets[priority].queue);
174 entry = bucket->queue.next;
178 if (list_empty(&bucket->queue))
[all...]
H A Dio-submitter.c23 * queue thread (or more than one) to prevent blocking in other threads if the storage device has a
24 * full queue. The plug structure allows that thread to do better batching of requests to make the
32 * reorder them to try to encourage I/O request merging in the request queue underneath.
35 struct vdo_work_queue *queue; member in struct:bio_queue_data
331 * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
336 * no error can occur on the bio queue. Currently this is true for all callers, but additional care
391 /* Setup for each bio-submission work queue */
408 * Clean up the partially initialized bio-queue entirely and indicate that
422 * Clean up the partially initialized bio-queue entirely and indicate that
426 vdo_log_error("bio queue initializatio
[all...]
H A Dfunnel-queue.h13 * A funnel queue is a simple (almost) lock-free queue that accepts entries from multiple threads
19 * mechanism to ensure that only one thread is consuming from the queue. If more than one thread
20 * attempts to consume from the queue, the resulting behavior is undefined. Clients must not
21 * directly access or manipulate the internals of the queue, which are only exposed for the purpose
25 * the queue entries, and pointers to those structures are used exclusively by the queue. No macros
26 * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
27 * in the queue must all be the same so the client can derive their structure pointer from the
31 * soon as they are returned since this queue i
82 vdo_funnel_queue_put(struct funnel_queue *queue, struct funnel_queue_entry *entry) argument
[all...]
H A Dfunnel-workqueue.c15 #include "funnel-queue.h"
28 * DOC: Work queue definition.
36 /* Name of just the work queue (e.g., "cpuQ12") */
73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) argument
75 return ((queue == NULL) ?
76 NULL : container_of(queue, struct simple_work_queue, common));
79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) argument
81 return ((queue == NULL) ?
83 container_of(queue, struct round_robin_work_queue, common));
96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) argument
110 enqueue_work_queue_completion(struct simple_work_queue *queue, struct vdo_completion *completion) argument
153 run_start_hook(struct simple_work_queue *queue) argument
159 run_finish_hook(struct simple_work_queue *queue) argument
174 wait_for_next_completion(struct simple_work_queue *queue) argument
222 process_completion(struct simple_work_queue *queue, struct vdo_completion *completion) argument
233 service_work_queue(struct simple_work_queue *queue) argument
264 struct simple_work_queue *queue = ptr; local
273 free_simple_work_queue(struct simple_work_queue *queue) argument
283 free_round_robin_work_queue(struct round_robin_work_queue *queue) argument
298 vdo_free_work_queue(struct vdo_work_queue *queue) argument
317 struct simple_work_queue *queue; local
386 struct round_robin_work_queue *queue; local
444 finish_simple_work_queue(struct simple_work_queue *queue) argument
454 finish_round_robin_work_queue(struct round_robin_work_queue *queue) argument
465 vdo_finish_work_queue(struct vdo_work_queue *queue) argument
478 dump_simple_work_queue(struct simple_work_queue *queue) argument
499 vdo_dump_work_queue(struct vdo_work_queue *queue) argument
556 vdo_enqueue_work_queue(struct vdo_work_queue *queue, struct vdo_completion *completion) argument
612 struct simple_work_queue *queue = get_current_thread_work_queue(); local
617 vdo_get_work_queue_owner(struct vdo_work_queue *queue) argument
629 struct simple_work_queue *queue = get_current_thread_work_queue(); local
634 vdo_work_queue_type_is(struct vdo_work_queue *queue, const struct vdo_work_queue_type *type) argument
[all...]
/linux-master/drivers/md/dm-vdo/indexer/
H A Dindex.c48 * If a sparse cache has only one zone, it will not create a triage queue, but it still needs the
167 /* This is the request processing function for the triage queue. */
663 /* The triage queue is only needed for sparse multi-zone indexes. */
1362 struct uds_request_queue *queue; local
1367 queue = index->triage_queue;
1379 queue = index->zone_queues[request->zone_number];
1387 uds_request_queue_enqueue(queue, request);
H A Dfunnel-requestqueue.c12 #include "funnel-queue.h"
18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting
22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly
24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
35 * the queue to awaken immediately.
50 /* Wait queue for synchronizing producers and consumer */
68 static inline struct uds_request *poll_queues(struct uds_request_queue *queue) argument
72 entry = vdo_funnel_queue_poll(queue
83 are_queues_idle(struct uds_request_queue *queue) argument
94 dequeue_request(struct uds_request_queue *queue, struct uds_request **request_ptr, bool *waited_ptr) argument
115 wait_for_request(struct uds_request_queue *queue, bool dormant, unsigned long timeout, struct uds_request **request, bool *waited) argument
133 struct uds_request_queue *queue = arg; local
199 struct uds_request_queue *queue; local
234 wake_up_worker(struct uds_request_queue *queue) argument
240 uds_request_queue_enqueue(struct uds_request_queue *queue, struct uds_request *request) argument
257 uds_request_queue_finish(struct uds_request_queue *queue) argument
[all...]
H A Dfunnel-requestqueue.h12 * A simple request queue which will handle new requests in the order in which they are received,
26 void uds_request_queue_enqueue(struct uds_request_queue *queue,
29 void uds_request_queue_finish(struct uds_request_queue *queue);

Completed in 491 milliseconds

1234567891011>>