Lines Matching refs:queue

33  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
51 * incoming or outgoing queue the block will be freed.
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
103 iio_buffer_put(&block->queue->buffer);
166 struct iio_dma_buffer_queue *queue, size_t size)
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
183 block->queue = queue;
187 iio_buffer_get(&queue->buffer);
198 static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
202 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
207 wake_up_interruptible_poll(&queue->buffer.pollq, flags);
215 * pass back ownership of the block to the queue.
219 struct iio_dma_buffer_queue *queue = block->queue;
222 spin_lock_irqsave(&queue->list_lock, flags);
224 spin_unlock_irqrestore(&queue->list_lock, flags);
227 iio_dma_buffer_queue_wake(queue);
234 * @queue: Queue for which to complete blocks.
235 * @list: List of aborted blocks. All blocks in this list must be from @queue.
239 * hand the blocks back to the queue.
241 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
247 spin_lock_irqsave(&queue->list_lock, flags);
254 spin_unlock_irqrestore(&queue->list_lock, flags);
256 iio_dma_buffer_queue_wake(queue);
285 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
297 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
298 queue->buffer.length, 2);
300 mutex_lock(&queue->lock);
303 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
306 queue->fileio.block_size = size;
307 queue->fileio.active_block = NULL;
309 spin_lock_irq(&queue->list_lock);
310 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
311 block = queue->fileio.blocks[i];
323 spin_unlock_irq(&queue->list_lock);
325 INIT_LIST_HEAD(&queue->incoming);
327 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
328 if (queue->fileio.blocks[i]) {
329 block = queue->fileio.blocks[i];
342 block = iio_dma_buffer_alloc_block(queue, size);
347 queue->fileio.blocks[i] = block;
362 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
364 list_add_tail(&block->head, &queue->incoming);
371 mutex_unlock(&queue->lock);
377 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
381 spin_lock_irq(&queue->list_lock);
382 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
383 if (!queue->fileio.blocks[i])
385 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
387 spin_unlock_irq(&queue->list_lock);
389 INIT_LIST_HEAD(&queue->incoming);
391 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
392 if (!queue->fileio.blocks[i])
394 iio_buffer_block_put(queue->fileio.blocks[i]);
395 queue->fileio.blocks[i] = NULL;
397 queue->fileio.active_block = NULL;
400 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
410 if (!queue->ops)
415 ret = queue->ops->submit(queue, block);
444 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
447 mutex_lock(&queue->lock);
448 queue->active = true;
449 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
451 iio_dma_buffer_submit_block(queue, block);
453 mutex_unlock(&queue->lock);
470 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
472 mutex_lock(&queue->lock);
473 queue->active = false;
475 if (queue->ops && queue->ops->abort)
476 queue->ops->abort(queue);
477 mutex_unlock(&queue->lock);
483 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
488 } else if (queue->active) {
489 iio_dma_buffer_submit_block(queue, block);
492 list_add_tail(&block->head, &queue->incoming);
497 struct iio_dma_buffer_queue *queue)
502 spin_lock_irq(&queue->list_lock);
504 idx = queue->fileio.next_dequeue;
505 block = queue->fileio.blocks[idx];
508 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
509 queue->fileio.next_dequeue = idx;
514 spin_unlock_irq(&queue->list_lock);
522 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
530 mutex_lock(&queue->lock);
532 if (!queue->fileio.active_block) {
533 block = iio_dma_buffer_dequeue(queue);
538 queue->fileio.pos = 0;
539 queue->fileio.active_block = block;
541 block = queue->fileio.active_block;
545 if (n > block->bytes_used - queue->fileio.pos)
546 n = block->bytes_used - queue->fileio.pos;
547 addr = block->vaddr + queue->fileio.pos;
558 queue->fileio.pos += n;
560 if (queue->fileio.pos == block->bytes_used) {
561 queue->fileio.active_block = NULL;
562 iio_dma_buffer_enqueue(queue, block);
568 mutex_unlock(&queue->lock);
616 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
628 mutex_lock(&queue->lock);
629 if (queue->fileio.active_block)
630 data_available += queue->fileio.active_block->size;
632 spin_lock_irq(&queue->list_lock);
634 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
635 block = queue->fileio.blocks[i];
637 if (block != queue->fileio.active_block
642 spin_unlock_irq(&queue->list_lock);
643 mutex_unlock(&queue->lock);
686 * iio_dma_buffer_init() - Initialize DMA buffer queue
687 * @queue: Buffer to initialize
689 * @ops: DMA buffer queue callback operations
691 * The DMA device will be used by the queue to do DMA memory allocations. So it
695 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
698 iio_buffer_init(&queue->buffer);
699 queue->buffer.length = PAGE_SIZE;
700 queue->buffer.watermark = queue->buffer.length / 2;
701 queue->dev = dev;
702 queue->ops = ops;
704 INIT_LIST_HEAD(&queue->incoming);
706 mutex_init(&queue->lock);
707 spin_lock_init(&queue->list_lock);
714 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
715 * @queue: Buffer to cleanup
720 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
722 mutex_lock(&queue->lock);
724 iio_dma_buffer_fileio_free(queue);
725 queue->ops = NULL;
727 mutex_unlock(&queue->lock);
733 * @queue: Buffer to release
739 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
741 mutex_destroy(&queue->lock);