Lines Matching refs:mem
60 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
64 __free_pages(sg_page(&chunk->mem[i]),
65 get_order(chunk->mem[i].length));
73 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
74 lowmem_page_address(sg_page(&chunk->mem[i])),
75 sg_dma_address(&chunk->mem[i]));
97 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
109 sg_set_page(mem, page, PAGE_SIZE << order, 0);
113 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
117 &sg_dma_address(mem), gfp_mask);
121 sg_set_buf(mem, buf, PAGE_SIZE << order);
122 BUG_ON(mem->offset);
123 sg_dma_len(mem) = PAGE_SIZE << order;
163 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
174 &chunk->mem[chunk->npages],
177 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
193 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
208 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
329 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
330 *dma_handle = sg_dma_address(&chunk->mem[i]) +
332 dma_offset -= sg_dma_len(&chunk->mem[i]);
339 if (chunk->mem[i].length > offset) {
340 page = sg_page(&chunk->mem[i]);
343 offset -= chunk->mem[i].length;