Searched refs:slot (Results 51 - 75 of 956) sorted by last modified time

1234567891011>>

/linux-master/drivers/gpu/drm/i915/
H A Di915_gpu_error.c1094 const u64 slot = ggtt->error_capture.start; local
1130 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1135 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1141 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1148 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dintel_guc_ads.c277 struct guc_mmio_reg *slot; local
280 size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
291 regset->storage_max = size / sizeof(*slot);
294 slot = &regset->storage[pos];
296 *slot = *reg;
298 return slot;
310 struct guc_mmio_reg *slot; local
322 slot = __mmio_reg_add(regset, &entry);
323 if (IS_ERR(slot))
324 return PTR_ERR(slot);
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_context.c106 void __rcu **slot; local
110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 struct i915_vma *vma = rcu_dereference_raw(*slot);
133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
589 u16 slot, width, num_siblings; local
593 if (get_user(slot, &ext->engine_index))
609 if (slot >= set->num_engines) {
611 slot, set->num_engines);
615 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
617 "Invalid placement[%d], already occupied\n", slot);
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ras.c2767 void __rcu **slot; local
2771 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2772 ecc_err = radix_tree_deref_slot(slot);
2775 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
/linux-master/drivers/cxl/
H A Dcxlmem.h159 C(FWSLOT, -ENXIO, "FW slot is not supported for requested operation"), \
311 u8 slot; member in struct:cxl_mbox_transfer_fw
336 u8 slot; member in struct:cxl_mbox_activate_fw
/linux-master/drivers/cxl/core/
H A Dmemdev.c741 * @slot: slot number to activate
743 * Activate firmware in a given slot for the device specified.
749 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) argument
754 if (slot == 0 || slot > mds->fw.num_slots)
765 activate.slot = slot;
904 transfer->slot = mds->fw.next_slot;
910 transfer->slot
[all...]
/linux-master/drivers/acpi/apei/
H A Dghes.c931 int i, slot = -1, count; local
942 slot = i;
947 slot = i;
955 slot = i;
960 if (slot != -1) {
966 victim = xchg_release(&ghes_estatus_caches[slot],
971 * from the one based on which we selected the slot. Instead of
972 * going to the loop again to pick another slot, let's just
/linux-master/arch/x86/kvm/
H A Dx86.c367 unsigned slot; local
383 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
384 values = &msrs->values[slot];
386 wrmsrl(kvm_uret_msrs_list[slot], values->host);
445 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) argument
451 value = (value & mask) | (msrs->values[slot].host & ~mask);
452 if (value == msrs->values[slot].curr)
454 err = wrmsrl_safe(kvm_uret_msrs_list[slot], valu
12715 struct kvm_memory_slot *slot; local
12797 memslot_rmap_free(struct kvm_memory_slot *slot) argument
12807 kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) argument
12821 memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) argument
12843 kvm_alloc_memslot_metadata(struct kvm *kvm, struct kvm_memory_slot *slot) argument
[all...]
H A Dmmu.h293 __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages, argument
296 return gfn_to_index(slot->base_gfn + npages - 1,
297 slot->base_gfn, level) + 1;
301 kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) argument
303 return __kvm_mmu_slot_lpages(slot, slot->npages, level);
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.c721 unsigned int slot = msr - vmx->guest_uret_msrs; local
726 ret = kvm_set_user_return_msr(slot, data, msr->mask);
6796 struct kvm_memory_slot *slot; local
6811 * Explicitly grab the memslot using KVM's internal slot ID to ensure
6816 slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6817 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6834 pfn = gfn_to_pfn_memslot(slot, gfn);
/linux-master/arch/x86/kvm/svm/
H A Dsvm.c772 int slot = direct_access_msr_slot(msr); local
774 if (slot == -ENOENT)
779 set_bit(slot, svm->shadow_msr_intercept.read);
781 clear_bit(slot, svm->shadow_msr_intercept.read);
784 set_bit(slot, svm->shadow_msr_intercept.write);
786 clear_bit(slot, svm->shadow_msr_intercept.write);
/linux-master/arch/x86/kvm/mmu/
H A Dtdp_mmu.c1028 if (unlikely(!fault->slot))
1031 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1192 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1214 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1328 const struct kvm_memory_slot *slot, int min_level)
1335 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1336 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1337 slot->base_gfn + slot->npages, min_level);
1511 const struct kvm_memory_slot *slot,
1327 kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level) argument
1510 kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared) argument
1578 kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, const struct kvm_memory_slot *slot) argument
1638 kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot) argument
1649 zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, const struct kvm_memory_slot *slot) argument
1704 kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) argument
1755 kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level) argument
[all...]
H A Dtdp_mmu.h36 const struct kvm_memory_slot *slot, int min_level);
38 const struct kvm_memory_slot *slot);
40 struct kvm_memory_slot *slot,
44 const struct kvm_memory_slot *slot);
47 struct kvm_memory_slot *slot, gfn_t gfn,
51 const struct kvm_memory_slot *slot,
H A Dspte.h491 const struct kvm_memory_slot *slot,
H A Dpaging_tmpl.h234 * If the slot is read-only, simply do not process the accessed
235 * and dirty bits. This is the correct thing to do if the slot
364 struct kvm_memory_slot *slot; local
395 slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(real_gpa));
396 if (!kvm_is_visible_memslot(slot))
399 host_addr = gfn_to_hva_memslot_prot(slot, gpa_to_gfn(real_gpa),
536 struct kvm_memory_slot *slot; local
548 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK);
549 if (!slot)
552 pfn = gfn_to_pfn_memslot_atomic(slot, gf
908 struct kvm_memory_slot *slot; local
[all...]
H A Dspte.c138 const struct kvm_memory_slot *slot,
223 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
241 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
244 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
137 make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) argument
H A Dpage_track.c42 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) argument
44 vfree(slot->arch.gfn_write_track);
45 slot->arch.gfn_write_track = NULL;
48 static int __kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot, argument
51 const size_t size = sizeof(*slot->arch.gfn_write_track);
53 if (!slot->arch.gfn_write_track)
54 slot->arch.gfn_write_track = __vcalloc(npages, size,
57 return slot->arch.gfn_write_track ? 0 : -ENOMEM;
61 struct kvm_memory_slot *slot,
67 return __kvm_page_track_write_tracking_alloc(slot, npage
60 kvm_page_track_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) argument
70 kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot) argument
75 update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, short count) argument
90 __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) argument
113 __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) argument
136 kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) argument
172 struct kvm_memory_slot *slot; local
293 kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) argument
321 struct kvm_memory_slot *slot; local
351 struct kvm_memory_slot *slot; local
[all...]
H A Dmmu.c786 const struct kvm_memory_slot *slot, int level)
790 idx = gfn_to_index(gfn, slot->base_gfn, level);
791 return &slot->arch.lpage_info[level - 2][idx];
802 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, argument
809 linfo = lpage_info_slot(gfn, slot, i);
817 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) argument
819 update_gfn_disallow_lpage_count(slot, gfn, 1);
822 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) argument
824 update_gfn_disallow_lpage_count(slot, gfn, -1);
830 struct kvm_memory_slot *slot; local
785 lpage_info_slot(gfn_t gfn, const struct kvm_memory_slot *slot, int level) argument
887 struct kvm_memory_slot *slot; local
920 struct kvm_memory_slot *slot; local
1095 gfn_to_rmap(gfn_t gfn, int level, const struct kvm_memory_slot *slot) argument
1107 struct kvm_memory_slot *slot; local
1291 __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) argument
1316 kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) argument
1349 kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) argument
1382 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) argument
1423 kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) argument
1447 struct kvm_memory_slot *slot; local
1453 __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) argument
1459 kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
1467 const struct kvm_memory_slot *slot; member in struct:slot_rmap_walk_iterator
1491 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, const struct kvm_memory_slot *slot, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn) argument
1570 kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
1583 kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
1597 __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument
1621 rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument
2762 mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) argument
2863 mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) argument
2939 struct kvm_memory_slot *slot; local
3039 host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) argument
3103 __kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level, bool is_private) argument
3127 kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level) argument
3139 struct kvm_memory_slot *slot = fault->slot; local
3238 kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) argument
3718 struct kvm_memory_slot *slot; local
4346 struct kvm_memory_slot *slot = fault->slot; local
6065 __walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, bool flush) argument
6094 walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, bool flush_on_yield) argument
6105 walk_slot_rmaps_4k(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, bool flush_on_yield) argument
6425 slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) argument
6530 shadow_mmu_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) argument
6576 shadow_mmu_try_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) argument
6615 shadow_mmu_try_split_huge_pages(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) argument
6661 kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level) argument
6730 kvm_mmu_zap_collapsible_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) argument
6767 kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) argument
6779 kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) argument
6854 kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) argument
7175 struct kvm_memory_slot *slot; local
7346 hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7352 hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7358 hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7364 hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long attrs) argument
7385 struct kvm_memory_slot *slot = range->slot; local
7448 kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, struct kvm_memory_slot *slot) argument
[all...]
H A Dmmu_internal.h166 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
169 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
170 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
172 struct kvm_memory_slot *slot, u64 gfn,
236 struct kvm_memory_slot *slot; member in struct:kvm_page_fault
318 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
365 const struct kvm_memory_slot *slot, gfn_t gfn,
/linux-master/arch/x86/include/asm/
H A Dkvm_host.h1922 struct kvm_memory_slot *slot);
2312 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
/linux-master/arch/riscv/kvm/
H A Dmmu.c333 static void gstage_wp_memory_region(struct kvm *kvm, int slot) argument
336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
394 struct kvm_memory_slot *slot,
398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
423 struct kvm_memory_slot *slot)
425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
426 phys_addr_t size = slot->npages << PAGE_SHIFT;
441 * the memory slot is write protected.
393 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) argument
422 kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) argument
/linux-master/arch/riscv/kernel/probes/
H A Dkprobes.c91 case INSN_GOOD: /* instruction uses slot */
168 unsigned long slot; local
180 slot = (unsigned long)p->ainsn.api.insn;
185 instruction_pointer_set(regs, slot);
/linux-master/arch/powerpc/platforms/pseries/
H A Dlpar.c763 unsigned long slot; local
788 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
804 pr_devel(" -> slot: %lu\n", slot & 7);
809 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
821 /* pick a random slot to start at */
923 static long pSeries_lpar_hpte_updatepp(unsigned long slot, argument
942 want_v, slot, flags, psize);
944 lpar_rc = plpar_pte_protect(flags, slot, want_v);
988 long slot; local
1017 unsigned long lpar_rc, slot, vsid, flags; local
1037 pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int apsize, int ssize, int local) argument
1147 hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn, int count, int psize, int ssize) argument
1193 hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn, int count, int psize, int ssize) argument
1227 __pSeries_lpar_hugepage_invalidate(unsigned long *slot, unsigned long *vpn, int count, int psize, int ssize) argument
1258 unsigned long shift, hidx, vpn = 0, hash, slot; local
1311 unsigned long slot, vsid; local
1334 unsigned long slot, hash, hidx; local
1354 unsigned long index, shift, slot, current_vpgb, vpgb; local
1533 unsigned long index, shift, slot; local
[all...]
/linux-master/arch/powerpc/platforms/cell/spufs/
H A Dfile.c1768 lscsa->decr.slot[0] = (u32) val;
1777 return lscsa->decr.slot[0];
1819 lscsa->event_mask.slot[0] = (u32) val;
1828 return lscsa->event_mask.slot[0];
1856 lscsa->srr0.slot[0] = (u32) val;
1865 return lscsa->srr0.slot[0];
2059 info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
/linux-master/arch/powerpc/mm/ptdump/
H A Dhashpagetable.c327 unsigned long slot; local
336 slot = base_hpte_find(ea, psize, true, &v, &r);
339 if (slot == -1)
340 slot = base_hpte_find(ea, psize, false, &v, &r);
343 if (slot == -1)

Completed in 284 milliseconds

1234567891011>>