Searched refs:managed (Results 1 - 25 of 36) sorted by relevance

12

/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmmgp10b.c45 gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
49 return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
H A Dvmmmcp77.c39 mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
43 return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
H A Dvmmgm20b.c57 gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
61 return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
66 gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
70 return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
H A Dvmmgk20a.c67 gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
71 return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
H A Dvmmgm200.c144 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
168 return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
172 gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
176 return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
181 gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
185 return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
142 gm200_vmm_new_(const struct nvkm_vmm_func *func_16, const struct nvkm_vmm_func *func_17, struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, void *argv, u32 argc, struct lock_class_key *key, const char *name, struct nvkm_vmm **pvmm) argument
H A Dvmmgk104.c98 gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
102 return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
H A Dvmmtu102.c73 tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
77 return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
H A Dvmmnv04.c103 u32 pd_header, bool managed, u64 addr, u64 size,
112 ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
121 nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
129 ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
102 nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, u32 pd_header, bool managed, u64 addr, u64 size, void *argv, u32 argc, struct lock_class_key *key, const char *name, struct nvkm_vmm **pvmm) argument
H A Dvmmgv100.c83 gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
87 return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
H A Dvmmnv41.c106 nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
110 return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
H A Duvmm.c61 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
94 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
122 if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
171 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
469 if (!uvmm->vmm->managed.raw)
544 bool managed, raw; local
547 managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
561 ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
573 uvmm->vmm->managed.raw = raw;
H A Dpriv.h35 int (*ctor)(struct nvkm_mmu *, bool managed, u64 addr, u64 size,
H A Dvmmgf100.c404 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
409 case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
411 case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
420 gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
424 return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
402 gf100_vmm_new_(const struct nvkm_vmm_func *func_16, const struct nvkm_vmm_func *func_17, struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, void *argv, u32 argc, struct lock_class_key *key, const char *name, struct nvkm_vmm **pvmm) argument
H A Dvmmnv44.c208 nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
216 ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
H A Dvmm.h163 u32 pd_header, bool managed, u64 addr, u64 size,
186 u64 p_start = vmm->managed.p.addr;
187 u64 p_end = p_start + vmm->managed.p.size;
188 u64 n_start = vmm->managed.n.addr;
189 u64 n_end = n_start + vmm->managed.n.size;
H A Dvmmgp100.c601 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
620 ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
629 gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, argument
633 return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
600 gp100_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, void *argv, u32 argc, struct lock_class_key *key, const char *name, struct nvkm_vmm **pvmm) argument
/linux-master/drivers/gpu/drm/
H A Ddrm_managed.c21 * DOC: managed resources
23 * Inspired by struct &device managed resources, but tied to the lifetime of
33 * Note that release actions and managed memory can also be added and removed
35 * safe. But it is recommended to use managed resources only for resources that
69 list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
121 spin_lock_irqsave(&dev->managed.lock, flags);
122 list_add(&dr->node.entry, &dev->managed.resources);
123 spin_unlock_irqrestore(&dev->managed.lock, flags);
131 WARN_ON(dev->managed.final_kfree);
134 dev->managed
[all...]
/linux-master/include/uapi/linux/
H A Dicmpv6.h49 managed:1; member in struct:icmp6hdr::__anon1961::icmpv6_nd_ra
52 __u8 managed:1, member in struct:icmp6hdr::__anon1961::icmpv6_nd_ra
77 #define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed
/linux-master/kernel/irq/
H A Dmatrix.c14 unsigned int managed; member in struct:cpumap
92 cm->available -= cm->managed + m->systembits_inalloc;
117 unsigned int num, bool managed)
127 if (managed)
155 /* Find the best CPU which has the lowest number of managed IRQs allocated */
208 * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
227 cm->managed++;
246 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
252 * This removes not allocated managed interrupts from the map. It does
253 * not matter which one because the managed interrupt
116 matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, unsigned int num, bool managed) argument
424 irq_matrix_free(struct irq_matrix *m, unsigned int cpu, unsigned int bit, bool managed) argument
[all...]
/linux-master/drivers/net/mdio/
H A Dof_mdio.c394 const char *managed; local
403 err = of_property_read_string(np, "managed", &managed);
404 if (err == 0 && strcmp(managed, "auto") != 0)
421 const char *managed; local
423 if (of_property_read_string(np, "managed", &managed) == 0 &&
424 strcmp(managed, "in-band-status") == 0) {
/linux-master/include/trace/events/
H A Dirq_matrix.h78 __field( unsigned int, managed )
91 __entry->managed = cmap->managed;
98 TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u",
101 __entry->managed, __entry->online_maps,
/linux-master/include/drm/
H A Ddrm_device.h60 * @managed:
66 /** @managed.resources: managed resources list */
68 /** @managed.final_kfree: pointer for final kfree() call */
70 /** @managed.lock: protects @managed.resources */
72 } managed; member in struct:drm_device
/linux-master/sound/core/
H A Dpcm_memory.c261 size_t size, size_t max, bool managed)
290 if (managed)
297 bool managed)
303 err = preallocate_pages(substream, type, data, size, max, managed);
355 * Do pre-allocation for the given DMA buffer type, and set the managed
259 preallocate_pages(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max, bool managed) argument
295 preallocate_pages_for_all(struct snd_pcm *pcm, int type, void *data, size_t size, size_t max, bool managed) argument
/linux-master/arch/x86/kernel/apic/
H A Dvector.c147 bool managed = irqd_affinity_is_managed(irqd); local
178 managed);
348 bool managed = irqd_affinity_is_managed(irqd); local
360 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
369 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
742 * and movable in the cpu hotplug check and it prevents managed
939 bool managed = apicd->is_managed; local
951 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
952 irq_matrix_free(vector_matrix, cpu, vector, managed);
/linux-master/drivers/gpu/drm/nouveau/include/nvkm/subdev/
H A Dmmu.h51 } managed; member in struct:nvkm_vmm

Completed in 186 milliseconds

12