1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2013-2015 Analog Devices Inc.
4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/workqueue.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/poll.h>
15#include <linux/iio/buffer_impl.h>
16#include <linux/iio/buffer-dma.h>
17#include <linux/dma-mapping.h>
18#include <linux/sizes.h>
19
20/*
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
23 * memory is exchanged between the hardware and the application. Increasing the
24 * basic unit of data exchange from one sample to one block decreases the
25 * management overhead that is associated with each sample. E.g. if we say the
26 * management overhead for one exchange is x and the unit of exchange is one
27 * sample the overhead will be x for each sample. Whereas when using a block
28 * which contains n samples the overhead per sample is reduced to x/n. This
29 * allows to achieve much higher samplerates than what can be sustained with
30 * the one sample approach.
31 *
32 * Blocks are exchanged between the DMA controller and the application via the
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
36 * are waiting for the application to dequeue them and read the data.
37 *
38 * A block can be in one of the following states:
39 *  * Owned by the application. In this state the application can read data from
40 *    the block.
41 *  * On the incoming list: Blocks on the incoming list are queued up to be
42 *    processed by the DMA controller.
43 *  * Owned by the DMA controller: The DMA controller is processing the block
44 *    and filling it with data.
45 *  * On the outgoing list: Blocks on the outgoing list have been successfully
46 *    processed by the DMA controller and contain data. They can be dequeued by
47 *    the application.
48 *  * Dead: A block that is dead has been marked as to be freed. It might still
49 *    be owned by either the application or the DMA controller at the moment.
50 *    But once they are done processing it instead of going to either the
51 *    incoming or outgoing queue the block will be freed.
52 *
53 * In addition to this blocks are reference counted and the memory associated
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
57 *
58 * The iio_dma_buffer implementation provides a generic infrastructure for
59 * managing the blocks.
60 *
61 * A driver for a specific piece of hardware that has DMA capabilities need to
62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
63 * callback is supposed to initiate the DMA transfer copying data from the
64 * converter to the memory region of the block. Once the DMA transfer has been
65 * completed the driver must call iio_dma_buffer_block_done() for the completed
66 * block.
67 *
68 * Prior to this it must set the bytes_used field of the block contains
69 * the actual number of bytes in the buffer. Typically this will be equal to the
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
72 * either case it is expected that bytes_used is a multiple of the bytes per
73 * datum, i.e. the block must not contain partial samples.
74 *
75 * The driver must call iio_dma_buffer_block_done() for each block it has
76 * received through its submit_block() callback, even if it does not actually
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
79 * to 0.
80 *
81 * In addition it is recommended that a driver implements the abort() callback.
82 * It will be called when the buffer is disabled and can be used to cancel
83 * pending and stop active transfers.
84 *
85 * The specific driver implementation should use the default callback
86 * implementations provided by this module for the iio_buffer_access_funcs
87 * struct. It may overload some callbacks with custom variants if the hardware
88 * has special requirements that are not handled by the generic functions. If a
89 * driver chooses to overload a callback it has to ensure that the generic
90 * callback is called from within the custom callback.
91 */
92
93static void iio_buffer_block_release(struct kref *kref)
94{
95	struct iio_dma_buffer_block *block = container_of(kref,
96		struct iio_dma_buffer_block, kref);
97
98	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
99
100	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101					block->vaddr, block->phys_addr);
102
103	iio_buffer_put(&block->queue->buffer);
104	kfree(block);
105}
106
107static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
108{
109	kref_get(&block->kref);
110}
111
112static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
113{
114	kref_put(&block->kref, iio_buffer_block_release);
115}
116
117/*
118 * dma_free_coherent can sleep, hence we need to take some special care to be
119 * able to drop a reference from an atomic context.
120 */
121static LIST_HEAD(iio_dma_buffer_dead_blocks);
122static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
123
124static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
125{
126	struct iio_dma_buffer_block *block, *_block;
127	LIST_HEAD(block_list);
128
129	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
130	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
131	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
132
133	list_for_each_entry_safe(block, _block, &block_list, head)
134		iio_buffer_block_release(&block->kref);
135}
136static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
137
138static void iio_buffer_block_release_atomic(struct kref *kref)
139{
140	struct iio_dma_buffer_block *block;
141	unsigned long flags;
142
143	block = container_of(kref, struct iio_dma_buffer_block, kref);
144
145	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
146	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
147	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
148
149	schedule_work(&iio_dma_buffer_cleanup_work);
150}
151
152/*
153 * Version of iio_buffer_block_put() that can be called from atomic context
154 */
155static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
156{
157	kref_put(&block->kref, iio_buffer_block_release_atomic);
158}
159
160static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
161{
162	return container_of(buf, struct iio_dma_buffer_queue, buffer);
163}
164
165static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
166	struct iio_dma_buffer_queue *queue, size_t size)
167{
168	struct iio_dma_buffer_block *block;
169
170	block = kzalloc(sizeof(*block), GFP_KERNEL);
171	if (!block)
172		return NULL;
173
174	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175		&block->phys_addr, GFP_KERNEL);
176	if (!block->vaddr) {
177		kfree(block);
178		return NULL;
179	}
180
181	block->size = size;
182	block->state = IIO_BLOCK_STATE_DONE;
183	block->queue = queue;
184	INIT_LIST_HEAD(&block->head);
185	kref_init(&block->kref);
186
187	iio_buffer_get(&queue->buffer);
188
189	return block;
190}
191
192static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
193{
194	if (block->state != IIO_BLOCK_STATE_DEAD)
195		block->state = IIO_BLOCK_STATE_DONE;
196}
197
198/**
199 * iio_dma_buffer_block_done() - Indicate that a block has been completed
200 * @block: The completed block
201 *
202 * Should be called when the DMA controller has finished handling the block to
203 * pass back ownership of the block to the queue.
204 */
205void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
206{
207	struct iio_dma_buffer_queue *queue = block->queue;
208	unsigned long flags;
209
210	spin_lock_irqsave(&queue->list_lock, flags);
211	_iio_dma_buffer_block_done(block);
212	spin_unlock_irqrestore(&queue->list_lock, flags);
213
214	iio_buffer_block_put_atomic(block);
215	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
216}
217EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
218
219/**
220 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
221 *   aborted
222 * @queue: Queue for which to complete blocks.
223 * @list: List of aborted blocks. All blocks in this list must be from @queue.
224 *
225 * Typically called from the abort() callback after the DMA controller has been
226 * stopped. This will set bytes_used to 0 for each block in the list and then
227 * hand the blocks back to the queue.
228 */
229void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
230	struct list_head *list)
231{
232	struct iio_dma_buffer_block *block, *_block;
233	unsigned long flags;
234
235	spin_lock_irqsave(&queue->list_lock, flags);
236	list_for_each_entry_safe(block, _block, list, head) {
237		list_del(&block->head);
238		block->bytes_used = 0;
239		_iio_dma_buffer_block_done(block);
240		iio_buffer_block_put_atomic(block);
241	}
242	spin_unlock_irqrestore(&queue->list_lock, flags);
243
244	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
245}
246EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
247
248static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
249{
250	/*
251	 * If the core owns the block it can be re-used. This should be the
252	 * default case when enabling the buffer, unless the DMA controller does
253	 * not support abort and has not given back the block yet.
254	 */
255	switch (block->state) {
256	case IIO_BLOCK_STATE_QUEUED:
257	case IIO_BLOCK_STATE_DONE:
258		return true;
259	default:
260		return false;
261	}
262}
263
264/**
265 * iio_dma_buffer_request_update() - DMA buffer request_update callback
266 * @buffer: The buffer which to request an update
267 *
268 * Should be used as the iio_dma_buffer_request_update() callback for
269 * iio_buffer_access_ops struct for DMA buffers.
270 */
271int iio_dma_buffer_request_update(struct iio_buffer *buffer)
272{
273	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
274	struct iio_dma_buffer_block *block;
275	bool try_reuse = false;
276	size_t size;
277	int ret = 0;
278	int i;
279
280	/*
281	 * Split the buffer into two even parts. This is used as a double
282	 * buffering scheme with usually one block at a time being used by the
283	 * DMA and the other one by the application.
284	 */
285	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
286		queue->buffer.length, 2);
287
288	mutex_lock(&queue->lock);
289
290	/* Allocations are page aligned */
291	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
292		try_reuse = true;
293
294	queue->fileio.block_size = size;
295	queue->fileio.active_block = NULL;
296
297	spin_lock_irq(&queue->list_lock);
298	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
299		block = queue->fileio.blocks[i];
300
301		/* If we can't re-use it free it */
302		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
303			block->state = IIO_BLOCK_STATE_DEAD;
304	}
305
306	/*
307	 * At this point all blocks are either owned by the core or marked as
308	 * dead. This means we can reset the lists without having to fear
309	 * corrution.
310	 */
311	spin_unlock_irq(&queue->list_lock);
312
313	INIT_LIST_HEAD(&queue->incoming);
314
315	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
316		if (queue->fileio.blocks[i]) {
317			block = queue->fileio.blocks[i];
318			if (block->state == IIO_BLOCK_STATE_DEAD) {
319				/* Could not reuse it */
320				iio_buffer_block_put(block);
321				block = NULL;
322			} else {
323				block->size = size;
324			}
325		} else {
326			block = NULL;
327		}
328
329		if (!block) {
330			block = iio_dma_buffer_alloc_block(queue, size);
331			if (!block) {
332				ret = -ENOMEM;
333				goto out_unlock;
334			}
335			queue->fileio.blocks[i] = block;
336		}
337
338		block->state = IIO_BLOCK_STATE_QUEUED;
339		list_add_tail(&block->head, &queue->incoming);
340	}
341
342out_unlock:
343	mutex_unlock(&queue->lock);
344
345	return ret;
346}
347EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
348
349static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
350{
351	unsigned int i;
352
353	spin_lock_irq(&queue->list_lock);
354	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
355		if (!queue->fileio.blocks[i])
356			continue;
357		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
358	}
359	spin_unlock_irq(&queue->list_lock);
360
361	INIT_LIST_HEAD(&queue->incoming);
362
363	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
364		if (!queue->fileio.blocks[i])
365			continue;
366		iio_buffer_block_put(queue->fileio.blocks[i]);
367		queue->fileio.blocks[i] = NULL;
368	}
369	queue->fileio.active_block = NULL;
370}
371
372static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
373	struct iio_dma_buffer_block *block)
374{
375	int ret;
376
377	/*
378	 * If the hardware has already been removed we put the block into
379	 * limbo. It will neither be on the incoming nor outgoing list, nor will
380	 * it ever complete. It will just wait to be freed eventually.
381	 */
382	if (!queue->ops)
383		return;
384
385	block->state = IIO_BLOCK_STATE_ACTIVE;
386	iio_buffer_block_get(block);
387	ret = queue->ops->submit(queue, block);
388	if (ret) {
389		/*
390		 * This is a bit of a problem and there is not much we can do
391		 * other then wait for the buffer to be disabled and re-enabled
392		 * and try again. But it should not really happen unless we run
393		 * out of memory or something similar.
394		 *
395		 * TODO: Implement support in the IIO core to allow buffers to
396		 * notify consumers that something went wrong and the buffer
397		 * should be disabled.
398		 */
399		iio_buffer_block_put(block);
400	}
401}
402
403/**
404 * iio_dma_buffer_enable() - Enable DMA buffer
405 * @buffer: IIO buffer to enable
406 * @indio_dev: IIO device the buffer is attached to
407 *
408 * Needs to be called when the device that the buffer is attached to starts
409 * sampling. Typically should be the iio_buffer_access_ops enable callback.
410 *
411 * This will allocate the DMA buffers and start the DMA transfers.
412 */
413int iio_dma_buffer_enable(struct iio_buffer *buffer,
414	struct iio_dev *indio_dev)
415{
416	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
417	struct iio_dma_buffer_block *block, *_block;
418
419	mutex_lock(&queue->lock);
420	queue->active = true;
421	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
422		list_del(&block->head);
423		iio_dma_buffer_submit_block(queue, block);
424	}
425	mutex_unlock(&queue->lock);
426
427	return 0;
428}
429EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
430
431/**
432 * iio_dma_buffer_disable() - Disable DMA buffer
433 * @buffer: IIO DMA buffer to disable
434 * @indio_dev: IIO device the buffer is attached to
435 *
436 * Needs to be called when the device that the buffer is attached to stops
437 * sampling. Typically should be the iio_buffer_access_ops disable callback.
438 */
439int iio_dma_buffer_disable(struct iio_buffer *buffer,
440	struct iio_dev *indio_dev)
441{
442	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
443
444	mutex_lock(&queue->lock);
445	queue->active = false;
446
447	if (queue->ops && queue->ops->abort)
448		queue->ops->abort(queue);
449	mutex_unlock(&queue->lock);
450
451	return 0;
452}
453EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
454
455static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
456	struct iio_dma_buffer_block *block)
457{
458	if (block->state == IIO_BLOCK_STATE_DEAD) {
459		iio_buffer_block_put(block);
460	} else if (queue->active) {
461		iio_dma_buffer_submit_block(queue, block);
462	} else {
463		block->state = IIO_BLOCK_STATE_QUEUED;
464		list_add_tail(&block->head, &queue->incoming);
465	}
466}
467
468static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
469	struct iio_dma_buffer_queue *queue)
470{
471	struct iio_dma_buffer_block *block;
472	unsigned int idx;
473
474	spin_lock_irq(&queue->list_lock);
475
476	idx = queue->fileio.next_dequeue;
477	block = queue->fileio.blocks[idx];
478
479	if (block->state == IIO_BLOCK_STATE_DONE) {
480		idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
481		queue->fileio.next_dequeue = idx;
482	} else {
483		block = NULL;
484	}
485
486	spin_unlock_irq(&queue->list_lock);
487
488	return block;
489}
490
491/**
492 * iio_dma_buffer_read() - DMA buffer read callback
493 * @buffer: Buffer to read form
494 * @n: Number of bytes to read
495 * @user_buffer: Userspace buffer to copy the data to
496 *
497 * Should be used as the read callback for iio_buffer_access_ops
498 * struct for DMA buffers.
499 */
500int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
501	char __user *user_buffer)
502{
503	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
504	struct iio_dma_buffer_block *block;
505	int ret;
506
507	if (n < buffer->bytes_per_datum)
508		return -EINVAL;
509
510	mutex_lock(&queue->lock);
511
512	if (!queue->fileio.active_block) {
513		block = iio_dma_buffer_dequeue(queue);
514		if (block == NULL) {
515			ret = 0;
516			goto out_unlock;
517		}
518		queue->fileio.pos = 0;
519		queue->fileio.active_block = block;
520	} else {
521		block = queue->fileio.active_block;
522	}
523
524	n = rounddown(n, buffer->bytes_per_datum);
525	if (n > block->bytes_used - queue->fileio.pos)
526		n = block->bytes_used - queue->fileio.pos;
527
528	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
529		ret = -EFAULT;
530		goto out_unlock;
531	}
532
533	queue->fileio.pos += n;
534
535	if (queue->fileio.pos == block->bytes_used) {
536		queue->fileio.active_block = NULL;
537		iio_dma_buffer_enqueue(queue, block);
538	}
539
540	ret = n;
541
542out_unlock:
543	mutex_unlock(&queue->lock);
544
545	return ret;
546}
547EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
548
549/**
550 * iio_dma_buffer_data_available() - DMA buffer data_available callback
551 * @buf: Buffer to check for data availability
552 *
553 * Should be used as the data_available callback for iio_buffer_access_ops
554 * struct for DMA buffers.
555 */
556size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
557{
558	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
559	struct iio_dma_buffer_block *block;
560	size_t data_available = 0;
561	unsigned int i;
562
563	/*
564	 * For counting the available bytes we'll use the size of the block not
565	 * the number of actual bytes available in the block. Otherwise it is
566	 * possible that we end up with a value that is lower than the watermark
567	 * but won't increase since all blocks are in use.
568	 */
569
570	mutex_lock(&queue->lock);
571	if (queue->fileio.active_block)
572		data_available += queue->fileio.active_block->size;
573
574	spin_lock_irq(&queue->list_lock);
575
576	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
577		block = queue->fileio.blocks[i];
578
579		if (block != queue->fileio.active_block
580		    && block->state == IIO_BLOCK_STATE_DONE)
581			data_available += block->size;
582	}
583
584	spin_unlock_irq(&queue->list_lock);
585	mutex_unlock(&queue->lock);
586
587	return data_available;
588}
589EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
590
591/**
592 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
593 * @buffer: Buffer to set the bytes-per-datum for
594 * @bpd: The new bytes-per-datum value
595 *
596 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
597 * struct for DMA buffers.
598 */
599int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
600{
601	buffer->bytes_per_datum = bpd;
602
603	return 0;
604}
605EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
606
607/**
608 * iio_dma_buffer_set_length - DMA buffer set_length callback
609 * @buffer: Buffer to set the length for
610 * @length: The new buffer length
611 *
612 * Should be used as the set_length callback for iio_buffer_access_ops
613 * struct for DMA buffers.
614 */
615int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
616{
617	/* Avoid an invalid state */
618	if (length < 2)
619		length = 2;
620	buffer->length = length;
621	buffer->watermark = length / 2;
622
623	return 0;
624}
625EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
626
627/**
628 * iio_dma_buffer_init() - Initialize DMA buffer queue
629 * @queue: Buffer to initialize
630 * @dev: DMA device
631 * @ops: DMA buffer queue callback operations
632 *
633 * The DMA device will be used by the queue to do DMA memory allocations. So it
634 * should refer to the device that will perform the DMA to ensure that
635 * allocations are done from a memory region that can be accessed by the device.
636 */
637int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
638	struct device *dev, const struct iio_dma_buffer_ops *ops)
639{
640	iio_buffer_init(&queue->buffer);
641	queue->buffer.length = PAGE_SIZE;
642	queue->buffer.watermark = queue->buffer.length / 2;
643	queue->dev = dev;
644	queue->ops = ops;
645
646	INIT_LIST_HEAD(&queue->incoming);
647
648	mutex_init(&queue->lock);
649	spin_lock_init(&queue->list_lock);
650
651	return 0;
652}
653EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
654
655/**
656 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
657 * @queue: Buffer to cleanup
658 *
659 * After this function has completed it is safe to free any resources that are
660 * associated with the buffer and are accessed inside the callback operations.
661 */
662void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
663{
664	mutex_lock(&queue->lock);
665
666	iio_dma_buffer_fileio_free(queue);
667	queue->ops = NULL;
668
669	mutex_unlock(&queue->lock);
670}
671EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
672
673/**
674 * iio_dma_buffer_release() - Release final buffer resources
675 * @queue: Buffer to release
676 *
677 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
678 * called in the buffers release callback implementation right before freeing
679 * the memory associated with the buffer.
680 */
681void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
682{
683	mutex_destroy(&queue->lock);
684}
685EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
686
687MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
688MODULE_DESCRIPTION("DMA buffer for the IIO framework");
689MODULE_LICENSE("GPL v2");
690