Lines Matching refs:queue

33  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
51 * incoming or outgoing queue the block will be freed.
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
103 iio_buffer_put(&block->queue->buffer);
166 struct iio_dma_buffer_queue *queue, size_t size)
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
183 block->queue = queue;
187 iio_buffer_get(&queue->buffer);
203 * pass back ownership of the block to the queue.
207 struct iio_dma_buffer_queue *queue = block->queue;
210 spin_lock_irqsave(&queue->list_lock, flags);
212 spin_unlock_irqrestore(&queue->list_lock, flags);
215 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
222 * @queue: Queue for which to complete blocks.
223 * @list: List of aborted blocks. All blocks in this list must be from @queue.
227 * hand the blocks back to the queue.
229 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
235 spin_lock_irqsave(&queue->list_lock, flags);
242 spin_unlock_irqrestore(&queue->list_lock, flags);
244 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
273 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
285 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
286 queue->buffer.length, 2);
288 mutex_lock(&queue->lock);
291 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
294 queue->fileio.block_size = size;
295 queue->fileio.active_block = NULL;
297 spin_lock_irq(&queue->list_lock);
298 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
299 block = queue->fileio.blocks[i];
311 spin_unlock_irq(&queue->list_lock);
313 INIT_LIST_HEAD(&queue->incoming);
315 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
316 if (queue->fileio.blocks[i]) {
317 block = queue->fileio.blocks[i];
330 block = iio_dma_buffer_alloc_block(queue, size);
335 queue->fileio.blocks[i] = block;
339 list_add_tail(&block->head, &queue->incoming);
343 mutex_unlock(&queue->lock);
349 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
353 spin_lock_irq(&queue->list_lock);
354 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
355 if (!queue->fileio.blocks[i])
357 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
359 spin_unlock_irq(&queue->list_lock);
361 INIT_LIST_HEAD(&queue->incoming);
363 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
364 if (!queue->fileio.blocks[i])
366 iio_buffer_block_put(queue->fileio.blocks[i]);
367 queue->fileio.blocks[i] = NULL;
369 queue->fileio.active_block = NULL;
372 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
382 if (!queue->ops)
387 ret = queue->ops->submit(queue, block);
416 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
419 mutex_lock(&queue->lock);
420 queue->active = true;
421 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
423 iio_dma_buffer_submit_block(queue, block);
425 mutex_unlock(&queue->lock);
442 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
444 mutex_lock(&queue->lock);
445 queue->active = false;
447 if (queue->ops && queue->ops->abort)
448 queue->ops->abort(queue);
449 mutex_unlock(&queue->lock);
455 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
460 } else if (queue->active) {
461 iio_dma_buffer_submit_block(queue, block);
464 list_add_tail(&block->head, &queue->incoming);
469 struct iio_dma_buffer_queue *queue)
474 spin_lock_irq(&queue->list_lock);
476 idx = queue->fileio.next_dequeue;
477 block = queue->fileio.blocks[idx];
480 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
481 queue->fileio.next_dequeue = idx;
486 spin_unlock_irq(&queue->list_lock);
503 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
510 mutex_lock(&queue->lock);
512 if (!queue->fileio.active_block) {
513 block = iio_dma_buffer_dequeue(queue);
518 queue->fileio.pos = 0;
519 queue->fileio.active_block = block;
521 block = queue->fileio.active_block;
525 if (n > block->bytes_used - queue->fileio.pos)
526 n = block->bytes_used - queue->fileio.pos;
528 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
533 queue->fileio.pos += n;
535 if (queue->fileio.pos == block->bytes_used) {
536 queue->fileio.active_block = NULL;
537 iio_dma_buffer_enqueue(queue, block);
543 mutex_unlock(&queue->lock);
558 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
570 mutex_lock(&queue->lock);
571 if (queue->fileio.active_block)
572 data_available += queue->fileio.active_block->size;
574 spin_lock_irq(&queue->list_lock);
576 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
577 block = queue->fileio.blocks[i];
579 if (block != queue->fileio.active_block
584 spin_unlock_irq(&queue->list_lock);
585 mutex_unlock(&queue->lock);
628 * iio_dma_buffer_init() - Initialize DMA buffer queue
629 * @queue: Buffer to initialize
631 * @ops: DMA buffer queue callback operations
633 * The DMA device will be used by the queue to do DMA memory allocations. So it
637 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
640 iio_buffer_init(&queue->buffer);
641 queue->buffer.length = PAGE_SIZE;
642 queue->buffer.watermark = queue->buffer.length / 2;
643 queue->dev = dev;
644 queue->ops = ops;
646 INIT_LIST_HEAD(&queue->incoming);
648 mutex_init(&queue->lock);
649 spin_lock_init(&queue->list_lock);
656 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
657 * @queue: Buffer to cleanup
662 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
664 mutex_lock(&queue->lock);
666 iio_dma_buffer_fileio_free(queue);
667 queue->ops = NULL;
669 mutex_unlock(&queue->lock);
675 * @queue: Buffer to release
681 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
683 mutex_destroy(&queue->lock);