Lines Matching defs:glob

40 	struct ttm_mem_global *glob;
67 mtx_lock(&zone->glob->lock);
78 mtx_unlock(&zone->glob->lock);
85 static void ttm_check_swapping(struct ttm_mem_global *glob);
105 mtx_lock(&zone->glob->lock);
118 mtx_unlock(&zone->glob->lock);
120 ttm_check_swapping(zone->glob);
126 static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
130 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
137 for (i = 0; i < glob->num_zones; ++i) {
138 zone = glob->zones[i];
162 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
168 mtx_lock(&glob->lock);
169 if (glob->shrink == NULL)
172 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
173 shrink = glob->shrink;
174 mtx_unlock(&glob->lock);
176 mtx_lock(&glob->lock);
181 mtx_unlock(&glob->lock);
188 struct ttm_mem_global *glob = arg;
190 ttm_shrink(glob, true, 0ULL);
193 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
206 zone->glob = glob;
207 glob->zone_kernel = zone;
209 glob->zones[glob->num_zones++] = zone;
213 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
242 zone->glob = glob;
243 glob->zone_dma32 = zone;
245 glob->zones[glob->num_zones++] = zone;
249 int ttm_mem_global_init(struct ttm_mem_global *glob)
256 mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
257 glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
258 taskqueue_thread_enqueue, &glob->swap_queue);
259 taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
260 TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
262 refcount_init(&glob->kobj_ref, 1);
266 ret = ttm_mem_init_kernel_zone(glob, mem);
269 ret = ttm_mem_init_dma32_zone(glob, mem);
272 for (i = 0; i < glob->num_zones; ++i) {
273 zone = glob->zones[i];
277 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
278 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
281 ttm_mem_global_release(glob);
285 void ttm_mem_global_release(struct ttm_mem_global *glob)
294 taskqueue_drain(glob->swap_queue, &glob->work);
295 taskqueue_free(glob->swap_queue);
296 glob->swap_queue = NULL;
297 for (i = 0; i < glob->num_zones; ++i) {
298 zone = glob->zones[i];
302 if (refcount_release(&glob->kobj_ref))
303 ttm_mem_global_kobj_release(glob);
306 static void ttm_check_swapping(struct ttm_mem_global *glob)
312 mtx_lock(&glob->lock);
313 for (i = 0; i < glob->num_zones; ++i) {
314 zone = glob->zones[i];
321 mtx_unlock(&glob->lock);
324 taskqueue_enqueue(glob->swap_queue, &glob->work);
328 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
335 mtx_lock(&glob->lock);
336 for (i = 0; i < glob->num_zones; ++i) {
337 zone = glob->zones[i];
342 mtx_unlock(&glob->lock);
345 void ttm_mem_global_free(struct ttm_mem_global *glob,
348 return ttm_mem_global_free_zone(glob, NULL, amount);
351 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
360 mtx_lock(&glob->lock);
361 for (i = 0; i < glob->num_zones; ++i) {
362 zone = glob->zones[i];
374 for (i = 0; i < glob->num_zones; ++i) {
375 zone = glob->zones[i];
384 mtx_unlock(&glob->lock);
385 ttm_check_swapping(glob);
391 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
398 while (unlikely(ttm_mem_global_reserve(glob,
406 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
412 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
420 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
426 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
438 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
439 zone = glob->zone_kernel;
440 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
444 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
448 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
449 zone = glob->zone_kernel;
450 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);