Lines Matching refs:chunk

47  * per chunk.
63 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
67 if (chunk->nsg > 0)
68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
71 for (i = 0; i < chunk->npages; ++i)
72 __free_pages(sg_page(&chunk->mem[i]),
73 get_order(chunk->mem[i].length));
76 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
80 for (i = 0; i < chunk->npages; ++i) {
81 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
82 lowmem_page_address(sg_page(&chunk->mem[i])),
83 sg_dma_address(&chunk->mem[i]));
89 struct mthca_icm_chunk *chunk, *tmp;
94 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
96 mthca_free_icm_coherent(dev, chunk);
98 mthca_free_icm_pages(dev, chunk);
100 kfree(chunk);
140 struct mthca_icm_chunk *chunk = NULL;
157 if (!chunk) {
158 chunk = kmalloc(sizeof *chunk,
160 if (!chunk)
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
164 chunk->npages = 0;
165 chunk->nsg = 0;
166 list_add_tail(&chunk->list, &icm->chunk_list);
174 &chunk->mem[chunk->npages],
177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
181 ++chunk->npages;
184 ++chunk->nsg;
185 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
186 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
187 chunk->npages,
190 if (chunk->nsg <= 0)
194 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
195 chunk = NULL;
205 if (!coherent && chunk) {
206 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
207 chunk->npages,
210 if (chunk->nsg <= 0)
280 struct mthca_icm_chunk *chunk;
296 list_for_each_entry(chunk, &icm->chunk_list, list) {
297 for (i = 0; i < chunk->npages; ++i) {
299 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
300 *dma_handle = sg_dma_address(&chunk->mem[i]) +
302 dma_offset -= sg_dma_len(&chunk->mem[i]);
307 if (chunk->mem[i].length > offset) {
308 page = sg_page(&chunk->mem[i]);
311 offset -= chunk->mem[i].length;
402 * Add a reference to this ICM chunk so that it never