Searched refs:usm (Results 1 - 20 of 20) sorted by relevance

/linux-master/drivers/gpu/drm/xe/
H A Dxe_gt_pagefault.c88 if (vm->usm.last_fault_vma) { /* Fast lookup */
89 if (vma_matches(vm->usm.last_fault_vma, page_addr))
90 vma = vm->usm.last_fault_vma;
146 mutex_lock(&xe->usm.lock);
147 vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
152 mutex_unlock(&xe->usm.lock);
234 vm->usm.last_fault_vma = vma;
344 pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
351 queue_work(gt->usm.pf_wq, &pf_queue->worker);
397 queue_work(gt->usm
[all...]
H A Dxe_bb.h17 struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
H A Dxe_bb.c32 struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm) argument
47 bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
H A Dxe_gt.h71 hwe->instance == gt->usm.reserved_bcs_instance;
H A Dxe_device.h149 return xe->usm.num_vm_in_fault_mode != 0;
154 return xe->usm.num_vm_in_non_fault_mode != 0;
H A Dxe_migrate.c52 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
197 batch = tile->primary_gt->usm.bb_pool->bo;
218 batch = tile->primary_gt->usm.bb_pool->bo;
364 primary_gt->usm.reserved_bcs_instance,
625 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm) argument
627 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
746 bool usm = xe->info.has_usm; local
778 bb = xe_bb_new(gt, batch_size, usm);
813 xe_migrate_batch_base(m, usm),
1003 bool usm local
1290 bool usm = !q && xe->info.has_usm; local
[all...]
H A Dxe_gt_types.h192 /** @usm: unified shared memory state */
195 * @usm.bb_pool: Pool from which batchbuffers, for USM operations
203 * @usm.reserved_bcs_instance: reserved BCS instance used for USM
207 /** @usm.pf_wq: page fault work queue, unbound, high priority */
209 /** @usm.acc_wq: access counter work queue, unbound, high priority */
212 * @usm.pf_queue: Page fault queue used to sync faults so faults can
219 /** @usm.pf_queue.gt: back pointer to GT */
222 /** @usm.pf_queue.data: data in the page fault queue */
225 * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
230 * @usm
267 } usm; member in struct:xe_gt
[all...]
H A Dxe_device.c243 drmm_mutex_init(&xe->drm, &xe->usm.lock);
244 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
251 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
253 &xe->usm.next_asid, GFP_KERNEL);
256 xa_erase(&xe->usm.asid_to_vm, asid);
H A Dxe_vm_types.h256 } usm; member in struct:xe_vm
H A Dxe_gt.c440 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
442 if (IS_ERR(gt->usm.bb_pool)) {
443 err = PTR_ERR(gt->usm.bb_pool);
H A Dxe_vm.c1123 if (vm->usm.last_fault_vma == vma)
1124 vm->usm.last_fault_vma = NULL;
1457 mutex_lock(&xe->usm.lock);
1459 xe->usm.num_vm_in_fault_mode++;
1461 xe->usm.num_vm_in_non_fault_mode++;
1462 mutex_unlock(&xe->usm.lock);
1575 mutex_lock(&xe->usm.lock);
1577 xe->usm.num_vm_in_fault_mode--;
1579 xe->usm.num_vm_in_non_fault_mode--;
1581 if (vm->usm
[all...]
H A Dxe_device_types.h337 /** @usm: unified memory state */
339 /** @usm.asid: convert a ASID to VM */
341 /** @usm.next_asid: next ASID, used to cyclical alloc asids */
343 /** @usm.num_vm_in_fault_mode: number of VM in fault mode */
345 /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
347 /** @usm.lock: protects UM state */
349 } usm; member in struct:xe_device
H A Dxe_trace.h400 __entry->asid = xe_vma_vm(vma)->usm.asid;
497 __entry->asid = vm->usm.asid;
H A Dxe_gt_tlb_invalidation.c300 action[len++] = xe_vma_vm(vma)->usm.asid;
H A Dxe_hw_engine.c517 gt->usm.reserved_bcs_instance = hwe->instance;
926 hwe->instance == gt->usm.reserved_bcs_instance;
H A Dxe_lrc.c776 xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
/linux-master/arch/powerpc/mm/
H A Ddrmem.c219 __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data, argument
229 ret = func(&lmb, &usm, data);
253 __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data, argument
275 ret = func(&lmb, &usm, data);
288 const __be32 *prop, *usm; local
301 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
305 ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
310 ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
321 __maybe_unused const __be32 **usm,
397 const __be32 *prop, *usm; local
320 update_lmb(struct drmem_lmb *updated_lmb, __maybe_unused const __be32 **usm, __maybe_unused void *data) argument
[all...]
H A Dnuma.c835 static inline int __init read_usm_ranges(const __be32 **usm) argument
843 return read_n_cells(n_mem_size_cells, usm);
851 const __be32 **usm,
866 if (*usm)
874 ranges = read_usm_ranges(usm);
881 base = read_n_cells(n_mem_addr_cells, usm);
882 size = read_n_cells(n_mem_size_cells, usm);
850 numa_setup_drmem_lmb(struct drmem_lmb *lmb, const __be32 **usm, void *data) argument
/linux-master/arch/powerpc/kernel/
H A Dprom.c522 const __be32 **usm,
540 if (*usm)
551 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
558 base = dt_mem_next_cell(dt_root_addr_cells, usm);
559 size = dt_mem_next_cell(dt_root_size_cells, usm);
521 early_init_drmem_lmb(struct drmem_lmb *lmb, const __be32 **usm, void *data) argument
/linux-master/arch/powerpc/kexec/
H A Dfile_load_64.c513 * @usm: linux,drconf-usable-memory property value.
518 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm, argument
529 if (*usm) {

Completed in 274 milliseconds