Lines Matching refs:buf

60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
64 unsigned long size = buf->size;
85 __free_page(buf->pages[last_page]);
93 buf->pages[last_page++] = &pages[i];
104 struct vb2_dma_sg_buf *buf;
112 buf = kzalloc(sizeof *buf, GFP_KERNEL);
113 if (!buf)
116 buf->vaddr = NULL;
117 buf->dma_dir = vb->vb2_queue->dma_dir;
118 buf->offset = 0;
119 buf->size = size;
121 buf->num_pages = size >> PAGE_SHIFT;
122 buf->dma_sgt = &buf->sg_table;
129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
130 if (!buf->pages)
133 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
138 buf->num_pages, 0, size, GFP_KERNEL);
143 buf->dev = get_device(dev);
145 sgt = &buf->sg_table;
150 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
154 buf->handler.refcount = &buf->refcount;
155 buf->handler.put = vb2_dma_sg_put;
156 buf->handler.arg = buf;
157 buf->vb = vb;
159 refcount_set(&buf->refcount, 1);
162 __func__, buf->num_pages);
163 return buf;
166 put_device(buf->dev);
167 sg_free_table(buf->dma_sgt);
169 num_pages = buf->num_pages;
171 __free_page(buf->pages[num_pages]);
173 kvfree(buf->pages);
175 kfree(buf);
181 struct vb2_dma_sg_buf *buf = buf_priv;
182 struct sg_table *sgt = &buf->sg_table;
183 int i = buf->num_pages;
185 if (refcount_dec_and_test(&buf->refcount)) {
187 buf->num_pages);
188 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 if (buf->vaddr)
191 vm_unmap_ram(buf->vaddr, buf->num_pages);
192 sg_free_table(buf->dma_sgt);
194 __free_page(buf->pages[i]);
195 kvfree(buf->pages);
196 put_device(buf->dev);
197 kfree(buf);
203 struct vb2_dma_sg_buf *buf = buf_priv;
204 struct sg_table *sgt = buf->dma_sgt;
206 if (buf->vb->skip_cache_sync_on_prepare)
209 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
214 struct vb2_dma_sg_buf *buf = buf_priv;
215 struct sg_table *sgt = buf->dma_sgt;
217 if (buf->vb->skip_cache_sync_on_finish)
220 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
226 struct vb2_dma_sg_buf *buf;
233 buf = kzalloc(sizeof *buf, GFP_KERNEL);
234 if (!buf)
237 buf->vaddr = NULL;
238 buf->dev = dev;
239 buf->dma_dir = vb->vb2_queue->dma_dir;
240 buf->offset = vaddr & ~PAGE_MASK;
241 buf->size = size;
242 buf->dma_sgt = &buf->sg_table;
243 buf->vb = vb;
245 buf->dma_dir == DMA_FROM_DEVICE ||
246 buf->dma_dir == DMA_BIDIRECTIONAL);
249 buf->vec = vec;
251 buf->pages = frame_vector_pages(vec);
252 if (IS_ERR(buf->pages))
254 buf->num_pages = frame_vector_count(vec);
256 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
257 buf->num_pages, buf->offset, size, 0))
260 sgt = &buf->sg_table;
265 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
269 return buf;
272 sg_free_table(&buf->sg_table);
276 kfree(buf);
286 struct vb2_dma_sg_buf *buf = buf_priv;
287 struct sg_table *sgt = &buf->sg_table;
288 int i = buf->num_pages;
291 __func__, buf->num_pages);
292 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
293 if (buf->vaddr)
294 vm_unmap_ram(buf->vaddr, buf->num_pages);
295 sg_free_table(buf->dma_sgt);
296 if (buf->dma_dir == DMA_FROM_DEVICE ||
297 buf->dma_dir == DMA_BIDIRECTIONAL)
299 set_page_dirty_lock(buf->pages[i]);
300 vb2_destroy_framevec(buf->vec);
301 kfree(buf);
306 struct vb2_dma_sg_buf *buf = buf_priv;
310 BUG_ON(!buf);
312 if (!buf->vaddr) {
313 if (buf->db_attach) {
314 ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
315 buf->vaddr = ret ? NULL : map.vaddr;
317 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
322 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
327 struct vb2_dma_sg_buf *buf = buf_priv;
329 return refcount_read(&buf->refcount);
334 struct vb2_dma_sg_buf *buf = buf_priv;
337 if (!buf) {
342 err = vm_map_pages(vma, buf->pages, buf->num_pages);
351 vma->vm_private_data = &buf->handler;
375 struct vb2_dma_sg_buf *buf = dbuf->priv;
383 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
386 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
392 rd = buf->dma_sgt->sgl;
469 struct vb2_dma_sg_buf *buf = dbuf->priv;
470 struct sg_table *sgt = buf->dma_sgt;
472 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
480 struct vb2_dma_sg_buf *buf = dbuf->priv;
481 struct sg_table *sgt = buf->dma_sgt;
483 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
490 struct vb2_dma_sg_buf *buf;
493 buf = dbuf->priv;
494 vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
525 struct vb2_dma_sg_buf *buf = buf_priv;
530 exp_info.size = buf->size;
532 exp_info.priv = buf;
534 if (WARN_ON(!buf->dma_sgt))
542 refcount_inc(&buf->refcount);
553 struct vb2_dma_sg_buf *buf = mem_priv;
556 if (WARN_ON(!buf->db_attach)) {
561 if (WARN_ON(buf->dma_sgt)) {
567 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
573 buf->dma_sgt = sgt;
574 buf->vaddr = NULL;
581 struct vb2_dma_sg_buf *buf = mem_priv;
582 struct sg_table *sgt = buf->dma_sgt;
583 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
585 if (WARN_ON(!buf->db_attach)) {
595 if (buf->vaddr) {
596 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
597 buf->vaddr = NULL;
599 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
601 buf->dma_sgt = NULL;
606 struct vb2_dma_sg_buf *buf = mem_priv;
609 if (WARN_ON(buf->dma_sgt))
610 vb2_dma_sg_unmap_dmabuf(buf);
613 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
614 kfree(buf);
620 struct vb2_dma_sg_buf *buf;
629 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
630 if (!buf)
633 buf->dev = dev;
635 dba = dma_buf_attach(dbuf, buf->dev);
638 kfree(buf);
642 buf->dma_dir = vb->vb2_queue->dma_dir;
643 buf->size = size;
644 buf->db_attach = dba;
645 buf->vb = vb;
647 return buf;
652 struct vb2_dma_sg_buf *buf = buf_priv;
654 return buf->dma_sgt;