Lines Matching defs:cd
18 struct arm_smmu_ctx_desc *cd;
70 struct arm_smmu_ctx_desc *cd;
74 cd = xa_load(&arm_smmu_asid_xa, asid);
75 if (!cd)
78 if (cd->mm) {
79 if (WARN_ON(cd->mm != mm))
81 /* All devices bound to this mm use the same cd struct. */
82 refcount_inc(&cd->refs);
83 return cd;
86 smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
89 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
98 cd->asid = new_asid;
194 struct arm_smmu_ctx_desc *cd;
206 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
207 if (!cd) {
212 refcount_set(&cd->refs, 1);
221 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
227 cd->asid = asid;
228 cd->mm = mm;
230 return cd;
233 arm_smmu_free_asid(cd);
235 kfree(cd);
243 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
245 if (arm_smmu_free_asid(cd)) {
247 arm64_mm_context_put(cd->mm);
248 mmdrop(cd->mm);
249 kfree(cd);
288 smmu_mn->cd->asid);
291 smmu_mn->cd->asid,
314 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
325 arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
331 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
355 struct arm_smmu_ctx_desc *cd;
365 cd = arm_smmu_alloc_shared_cd(mm);
366 if (IS_ERR(cd))
367 return ERR_CAST(cd);
376 smmu_mn->cd = cd;
390 arm_smmu_free_shared_cd(cd);
397 struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
410 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
417 arm_smmu_free_shared_cd(cd);
457 arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);