Lines Matching refs:cookie

147 static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
149 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
160 free_iova_fast(&cookie->iovad,
168 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
173 fq_ring_free_locked(cookie, fq);
177 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
179 atomic64_inc(&cookie->fq_flush_start_cnt);
180 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
181 atomic64_inc(&cookie->fq_flush_finish_cnt);
186 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
189 atomic_set(&cookie->fq_timer_on, 0);
190 fq_flush_iotlb(cookie);
192 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
193 fq_ring_free(cookie, cookie->single_fq);
196 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
200 static void queue_iova(struct iommu_dma_cookie *cookie,
217 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
218 fq = cookie->single_fq;
220 fq = raw_cpu_ptr(cookie->percpu_fq);
229 fq_ring_free_locked(cookie, fq);
232 fq_flush_iotlb(cookie);
233 fq_ring_free_locked(cookie, fq);
240 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
246 if (!atomic_read(&cookie->fq_timer_on) &&
247 !atomic_xchg(&cookie->fq_timer_on, 1))
248 mod_timer(&cookie->fq_timer,
249 jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
276 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
278 if (!cookie->fq_domain)
281 del_timer_sync(&cookie->fq_timer);
282 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
283 iommu_dma_free_fq_single(cookie->single_fq);
285 iommu_dma_free_fq_percpu(cookie->percpu_fq);
302 static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
304 size_t fq_size = cookie->options.fq_size;
311 cookie->single_fq = queue;
316 static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
318 size_t fq_size = cookie->options.fq_size;
329 cookie->percpu_fq = queue;
336 struct iommu_dma_cookie *cookie = domain->iova_cookie;
339 if (cookie->fq_domain)
342 atomic64_set(&cookie->fq_flush_start_cnt, 0);
343 atomic64_set(&cookie->fq_flush_finish_cnt, 0);
345 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
346 rc = iommu_dma_init_fq_single(cookie);
348 rc = iommu_dma_init_fq_percpu(cookie);
355 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
356 atomic_set(&cookie->fq_timer_on, 0);
362 WRITE_ONCE(cookie->fq_domain, domain);
366 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
368 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
369 return cookie->iovad.granule;
375 struct iommu_dma_cookie *cookie;
377 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
378 if (cookie) {
379 INIT_LIST_HEAD(&cookie->msi_page_list);
380 cookie->type = type;
382 return cookie;
416 struct iommu_dma_cookie *cookie;
424 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
425 if (!cookie)
428 cookie->msi_iova = base;
429 domain->iova_cookie = cookie;
441 struct iommu_dma_cookie *cookie = domain->iova_cookie;
444 if (!cookie)
447 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
448 iommu_dma_free_fq(cookie);
449 put_iova_domain(&cookie->iovad);
452 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
456 kfree(cookie);
481 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
484 struct iova_domain *iovad = &cookie->iovad;
499 list_add(&msi_page->list, &cookie->msi_page_list);
564 struct iommu_dma_cookie *cookie = domain->iova_cookie;
565 struct iova_domain *iovad = &cookie->iovad;
589 ret = cookie_init_hw_msi_region(cookie, region->start,
675 struct iommu_dma_cookie *cookie = domain->iova_cookie;
680 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
683 iovad = &cookie->iovad;
702 mutex_lock(&cookie->mutex);
720 iommu_dma_init_options(&cookie->options, dev);
730 mutex_unlock(&cookie->mutex);
766 struct iommu_dma_cookie *cookie = domain->iova_cookie;
767 struct iova_domain *iovad = &cookie->iovad;
770 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
771 cookie->msi_iova += size;
772 return cookie->msi_iova - size;
809 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
812 struct iova_domain *iovad = &cookie->iovad;
815 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
816 cookie->msi_iova -= size;
818 queue_iova(cookie, iova_pfn(iovad, iova),
830 struct iommu_dma_cookie *cookie = domain->iova_cookie;
831 struct iova_domain *iovad = &cookie->iovad;
839 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
846 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
853 struct iommu_dma_cookie *cookie = domain->iova_cookie;
854 struct iova_domain *iovad = &cookie->iovad;
874 iommu_dma_free_iova(cookie, iova, size, NULL);
948 struct iommu_dma_cookie *cookie = domain->iova_cookie;
949 struct iova_domain *iovad = &cookie->iovad;
1012 iommu_dma_free_iova(cookie, iova, size, NULL);
1147 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1148 struct iova_domain *iovad = &cookie->iovad;
1354 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1355 struct iova_domain *iovad = &cookie->iovad;
1461 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1778 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1782 size_t size = cookie_msi_granule(cookie);
1785 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1803 list_add(&msi_page->list, &cookie->msi_page_list);
1807 iommu_dma_free_iova(cookie, iova, size, NULL);