Lines Matching refs:mm

26 #include <linux/sched/mm.h>
107 INIT_LIST_HEAD(&obj->mm.link);
125 obj->mm.madv = I915_MADV_WILLNEED;
126 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
127 mutex_init(&obj->mm.get_page.lock);
128 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
129 mutex_init(&obj->mm.get_dma_page.lock);
143 mutex_destroy(&obj->mm.get_page.lock);
144 mutex_destroy(&obj->mm.get_dma_page.lock);
301 if (obj->mm.n_placements > 1)
302 kfree(obj->mm.placements);
306 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
307 atomic_dec(&i915->mm.free_count);
337 * This function cleans up usage of the object mm.pages member. It
365 atomic_set(&obj->mm.pages_pin_count, 0);
429 struct llist_node *freed = llist_del_all(&i915->mm.free_list);
438 container_of(work, struct drm_i915_private, mm.free_work);
458 atomic_inc(&i915->mm.free_count);
471 if (llist_add(&obj->freed, &i915->mm.free_list))
472 queue_work(i915->wq, &i915->mm.free_work);
522 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
523 dma - obj->mm.region->region.start,
590 int pin_count = atomic_read(&obj->mm.pages_pin_count);
620 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
625 return obj->mm.n_placements > 1;
687 unsigned int num_allowed = obj->mm.n_placements;
692 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
694 mr = i915->mm.regions[id];
701 if (obj->mm.region == mr)
717 if (mr == obj->mm.placements[i])
792 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
795 mr = i915->mm.regions[id];
802 if (GEM_WARN_ON(obj->mm.region != mr))
823 if (!obj->mm.n_placements) {
836 for (i = 0; i < obj->mm.n_placements; i++) {
837 if (obj->mm.placements[i]->type == type)
864 for (i = 0; i < obj->mm.n_placements; i++) {
866 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
869 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
878 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
964 return obj->mm.unknown_state;