Lines Matching refs:ctx

70 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
72 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
75 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
77 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
88 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
90 u64 granule = kvm_granule_size(ctx->level);
92 if (!kvm_level_supports_block_mapping(ctx->level))
95 if (granule > (ctx->end - ctx->addr))
101 return IS_ALIGNED(ctx->addr, granule);
179 const struct kvm_pgtable_visit_ctx *ctx,
185 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186 return walker->cb(ctx, visit);
217 struct kvm_pgtable_visit_ctx ctx = {
231 bool table = kvm_pte_table(ctx.old, level);
233 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
234 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
238 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
239 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
249 ctx.old = READ_ONCE(*ptep);
250 table = kvm_pte_table(ctx.old, level);
262 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
267 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
268 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
350 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
353 struct leaf_walk_data *data = ctx->arg;
355 data->pte = ctx->old;
356 data->level = ctx->level;
444 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
447 u64 phys = data->phys + (ctx->addr - ctx->start);
450 if (!kvm_block_mapping_supported(ctx, phys))
453 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
454 if (ctx->old == new)
456 if (!kvm_pte_valid(ctx->old))
457 ctx->mm_ops->get_page(ctx->ptep);
458 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
461 smp_store_release(ctx->ptep, new);
465 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
469 struct hyp_map_data *data = ctx->arg;
470 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
472 if (hyp_map_walker_try_leaf(ctx, data))
475 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
483 mm_ops->get_page(ctx->ptep);
484 smp_store_release(ctx->ptep, new);
512 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
516 u64 granule = kvm_granule_size(ctx->level);
517 u64 *unmapped = ctx->arg;
518 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
520 if (!kvm_pte_valid(ctx->old))
523 if (kvm_pte_table(ctx->old, ctx->level)) {
524 childp = kvm_pte_follow(ctx->old, mm_ops);
529 kvm_clear_pte(ctx->ptep);
531 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
533 if (ctx->end - ctx->addr < granule)
536 kvm_clear_pte(ctx->ptep);
538 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
544 mm_ops->put_page(ctx->ptep);
591 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
594 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
596 if (!kvm_pte_valid(ctx->old))
599 mm_ops->put_page(ctx->ptep);
601 if (kvm_pte_table(ctx->old, ctx->level))
602 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
800 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
802 if (!kvm_pgtable_walk_shared(ctx)) {
803 WRITE_ONCE(*ctx->ptep, new);
807 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
815 * @ctx: context of the visited pte.
824 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
827 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
829 if (stage2_pte_is_locked(ctx->old)) {
834 WARN_ON(!kvm_pgtable_walk_shared(ctx));
838 if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
841 if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
846 if (kvm_pte_table(ctx->old, ctx->level)) {
847 u64 size = kvm_granule_size(ctx->level);
848 u64 addr = ALIGN_DOWN(ctx->addr, size);
851 } else if (kvm_pte_valid(ctx->old)) {
853 ctx->addr, ctx->level);
857 if (stage2_pte_is_counted(ctx->old))
858 mm_ops->put_page(ctx->ptep);
863 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
865 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
867 WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
870 mm_ops->get_page(ctx->ptep);
872 smp_store_release(ctx->ptep, new);
888 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
892 struct kvm_pgtable *pgt = ctx->arg;
899 if (kvm_pte_valid(ctx->old)) {
900 kvm_clear_pte(ctx->ptep);
902 if (kvm_pte_table(ctx->old, ctx->level)) {
903 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
906 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
907 ctx->level);
911 mm_ops->put_page(ctx->ptep);
925 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
942 return phys + (ctx->addr - ctx->start);
945 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
948 u64 phys = stage2_map_walker_phys_addr(ctx, data);
950 if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
953 return kvm_block_mapping_supported(ctx, phys);
956 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
960 u64 phys = stage2_map_walker_phys_addr(ctx, data);
961 u64 granule = kvm_granule_size(ctx->level);
963 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
965 if (!stage2_leaf_mapping_allowed(ctx, data))
969 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
979 if (!stage2_pte_needs_update(ctx->old, new))
983 if (!kvm_pgtable_walk_shared(ctx) &&
984 !((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) {
985 bool old_is_counted = stage2_pte_is_counted(ctx->old);
989 mm_ops->put_page(ctx->ptep);
991 mm_ops->get_page(ctx->ptep);
993 WARN_ON_ONCE(!stage2_try_set_pte(ctx, new));
997 if (!stage2_try_break_pte(ctx, data->mmu))
1001 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
1006 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
1010 stage2_make_pte(ctx, new);
1015 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
1018 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1019 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1022 if (!stage2_leaf_mapping_allowed(ctx, data))
1025 ret = stage2_map_walker_try_leaf(ctx, data);
1029 mm_ops->free_unlinked_table(childp, ctx->level);
1033 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1036 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1040 ret = stage2_map_walker_try_leaf(ctx, data);
1044 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1054 if (!stage2_try_break_pte(ctx, data->mmu)) {
1065 stage2_make_pte(ctx, new);
1079 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1082 struct stage2_map_data *data = ctx->arg;
1086 return stage2_map_walk_table_pre(ctx, data);
1088 return stage2_map_walk_leaf(ctx, data);
1150 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1153 struct kvm_pgtable *pgt = ctx->arg;
1155 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1159 if (!kvm_pte_valid(ctx->old)) {
1160 if (stage2_pte_is_counted(ctx->old)) {
1161 kvm_clear_pte(ctx->ptep);
1162 mm_ops->put_page(ctx->ptep);
1167 if (kvm_pte_table(ctx->old, ctx->level)) {
1168 childp = kvm_pte_follow(ctx->old, mm_ops);
1172 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1181 stage2_unmap_put_pte(ctx, mmu, mm_ops);
1184 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1185 kvm_granule_size(ctx->level));
1217 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1220 kvm_pte_t pte = ctx->old;
1221 struct stage2_attr_data *data = ctx->arg;
1222 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1224 if (!kvm_pte_valid(ctx->old))
1227 data->level = ctx->level;
1243 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1245 kvm_granule_size(ctx->level));
1247 if (!stage2_try_set_pte(ctx, pte))
1310 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1313 kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1314 struct stage2_age_data *data = ctx->arg;
1316 if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1327 if (data->mkold && !stage2_try_set_pte(ctx, new))
1382 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1385 struct kvm_pgtable *pgt = ctx->arg;
1388 if (!stage2_pte_cacheable(pgt, ctx->old))
1392 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1393 kvm_granule_size(ctx->level));
1485 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1488 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1489 struct kvm_mmu_memory_cache *mc = ctx->arg;
1491 kvm_pte_t pte = ctx->old, new, *childp;
1493 s8 level = ctx->level;
1538 if (!stage2_try_break_pte(ctx, mmu)) {
1549 stage2_make_pte(ctx, new);
1603 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1606 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1608 if (!stage2_pte_is_counted(ctx->old))
1611 mm_ops->put_page(ctx->ptep);
1613 if (kvm_pte_table(ctx->old, ctx->level))
1614 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));