Lines Matching refs:buf

13 #include <linux/dma-buf.h>
75 struct vb2_dc_buf *buf = buf_priv;
77 return &buf->dma_addr;
96 struct vb2_dc_buf *buf = buf_priv;
98 if (buf->vaddr)
99 return buf->vaddr;
101 if (buf->db_attach) {
104 if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
105 buf->vaddr = map.vaddr;
107 return buf->vaddr;
110 if (buf->non_coherent_mem)
111 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
112 buf->dma_sgt);
113 return buf->vaddr;
118 struct vb2_dc_buf *buf = buf_priv;
120 return refcount_read(&buf->refcount);
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
129 if (buf->vb->skip_cache_sync_on_prepare)
132 if (!buf->non_coherent_mem)
136 if (buf->vaddr)
137 flush_kernel_vmap_range(buf->vaddr, buf->size);
140 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
145 struct vb2_dc_buf *buf = buf_priv;
146 struct sg_table *sgt = buf->dma_sgt;
149 if (buf->vb->skip_cache_sync_on_finish)
152 if (!buf->non_coherent_mem)
156 if (buf->vaddr)
157 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
160 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
169 struct vb2_dc_buf *buf = buf_priv;
171 if (!refcount_dec_and_test(&buf->refcount))
174 if (buf->non_coherent_mem) {
175 if (buf->vaddr)
176 dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
177 dma_free_noncontiguous(buf->dev, buf->size,
178 buf->dma_sgt, buf->dma_dir);
180 if (buf->sgt_base) {
181 sg_free_table(buf->sgt_base);
182 kfree(buf->sgt_base);
184 dma_free_attrs(buf->dev, buf->size, buf->cookie,
185 buf->dma_addr, buf->attrs);
187 put_device(buf->dev);
188 kfree(buf);
191 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
193 struct vb2_queue *q = buf->vb->vb2_queue;
195 buf->cookie = dma_alloc_attrs(buf->dev,
196 buf->size,
197 &buf->dma_addr,
199 buf->attrs);
200 if (!buf->cookie)
206 buf->vaddr = buf->cookie;
210 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
212 struct vb2_queue *q = buf->vb->vb2_queue;
214 buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
215 buf->size,
216 buf->dma_dir,
218 buf->attrs);
219 if (!buf->dma_sgt)
222 buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
235 struct vb2_dc_buf *buf;
241 buf = kzalloc(sizeof *buf, GFP_KERNEL);
242 if (!buf)
245 buf->attrs = vb->vb2_queue->dma_attrs;
246 buf->dma_dir = vb->vb2_queue->dma_dir;
247 buf->vb = vb;
248 buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
250 buf->size = size;
252 buf->dev = get_device(dev);
254 if (buf->non_coherent_mem)
255 ret = vb2_dc_alloc_non_coherent(buf);
257 ret = vb2_dc_alloc_coherent(buf);
261 kfree(buf);
265 buf->handler.refcount = &buf->refcount;
266 buf->handler.put = vb2_dc_put;
267 buf->handler.arg = buf;
269 refcount_set(&buf->refcount, 1);
271 return buf;
276 struct vb2_dc_buf *buf = buf_priv;
279 if (!buf) {
284 if (buf->non_coherent_mem)
285 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
286 buf->dma_sgt);
288 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
289 buf->size, buf->attrs);
296 vma->vm_private_data = &buf->handler;
302 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
303 buf->size);
324 struct vb2_dc_buf *buf = dbuf->priv;
332 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
335 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
341 rd = buf->sgt_base->sgl;
442 struct vb2_dc_buf *buf;
445 buf = dbuf->priv;
446 vaddr = vb2_dc_vaddr(buf->vb, buf);
473 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
478 if (buf->non_coherent_mem)
479 return buf->dma_sgt;
483 dev_err(buf->dev, "failed to alloc sg table\n");
487 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
488 buf->size, buf->attrs);
490 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
502 struct vb2_dc_buf *buf = buf_priv;
507 exp_info.size = buf->size;
509 exp_info.priv = buf;
511 if (!buf->sgt_base)
512 buf->sgt_base = vb2_dc_get_base_sgt(buf);
514 if (WARN_ON(!buf->sgt_base))
522 refcount_inc(&buf->refcount);
533 struct vb2_dc_buf *buf = buf_priv;
534 struct sg_table *sgt = buf->dma_sgt;
543 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
545 if (buf->dma_dir == DMA_FROM_DEVICE ||
546 buf->dma_dir == DMA_BIDIRECTIONAL) {
547 pages = frame_vector_pages(buf->vec);
550 for (i = 0; i < frame_vector_count(buf->vec); i++)
556 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
557 buf->dma_dir, 0);
559 vb2_destroy_framevec(buf->vec);
560 kfree(buf);
566 struct vb2_dc_buf *buf;
589 buf = kzalloc(sizeof *buf, GFP_KERNEL);
590 if (!buf)
593 buf->dev = dev;
594 buf->dma_dir = vb->vb2_queue->dma_dir;
595 buf->vb = vb;
598 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
599 buf->dma_dir == DMA_BIDIRECTIONAL);
604 buf->vec = vec;
617 buf->dma_addr = dma_map_resource(buf->dev,
618 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
619 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
644 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
659 buf->dma_addr = sg_dma_address(sgt->sgl);
660 buf->dma_sgt = sgt;
661 buf->non_coherent_mem = 1;
664 buf->size = size;
666 return buf;
669 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
681 kfree(buf);
692 struct vb2_dc_buf *buf = mem_priv;
696 if (WARN_ON(!buf->db_attach)) {
701 if (WARN_ON(buf->dma_sgt)) {
707 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
715 if (contig_size < buf->size) {
717 contig_size, buf->size);
718 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
719 buf->dma_dir);
723 buf->dma_addr = sg_dma_address(sgt->sgl);
724 buf->dma_sgt = sgt;
725 buf->vaddr = NULL;
732 struct vb2_dc_buf *buf = mem_priv;
733 struct sg_table *sgt = buf->dma_sgt;
734 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
736 if (WARN_ON(!buf->db_attach)) {
746 if (buf->vaddr) {
747 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
748 buf->vaddr = NULL;
750 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
752 buf->dma_addr = 0;
753 buf->dma_sgt = NULL;
758 struct vb2_dc_buf *buf = mem_priv;
761 if (WARN_ON(buf->dma_addr))
762 vb2_dc_unmap_dmabuf(buf);
765 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
766 kfree(buf);
772 struct vb2_dc_buf *buf;
781 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
782 if (!buf)
785 buf->dev = dev;
786 buf->vb = vb;
789 dba = dma_buf_attach(dbuf, buf->dev);
792 kfree(buf);
796 buf->dma_dir = vb->vb2_queue->dma_dir;
797 buf->size = size;
798 buf->db_attach = dba;
800 return buf;