1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <mach/mach_types.h>
25#include <vm/vm_page.h>
26#include <vm/vm_kern.h>				/* kmem_alloc */
27#include <vm/vm_purgeable_internal.h>
28#include <sys/kdebug.h>
29#include <kern/sched_prim.h>
30
31struct token {
32	token_cnt_t     count;
33	token_idx_t     next;
34};
35
36struct token	*tokens;
37token_idx_t	token_q_max_cnt = 0;
38vm_size_t	token_q_cur_size = 0;
39
40token_idx_t     token_free_idx = 0;		/* head of free queue */
41token_idx_t     token_init_idx = 1;		/* token 0 is reserved!! */
42int32_t		token_new_pagecount = 0;	/* count of pages that will
43						 * be added onto token queue */
44
45int             available_for_purge = 0;	/* increase when ripe token
46						 * added, decrease when ripe
47						 * token removed.
48						 * protected by page_queue_lock
49						 */
50
51static int token_q_allocating = 0;		/* flag for singlethreading
52						 * allocator */
53
54struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
55
56decl_lck_mtx_data(,vm_purgeable_queue_lock)
57
58#define TOKEN_ADD		0x40	/* 0x100 */
59#define TOKEN_DELETE		0x41	/* 0x104 */
60#define TOKEN_RIPEN		0x42	/* 0x108 */
61#define OBJECT_ADD		0x48	/* 0x120 */
62#define OBJECT_REMOVE		0x49	/* 0x124 */
63#define OBJECT_PURGE		0x4a	/* 0x128 */
64#define OBJECT_PURGE_ALL	0x4b	/* 0x12c */
65
66static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
67
68#if MACH_ASSERT
69static void
70vm_purgeable_token_check_queue(purgeable_q_t queue)
71{
72	int             token_cnt = 0, page_cnt = 0;
73	token_idx_t     token = queue->token_q_head;
74	token_idx_t     unripe = 0;
75	int             our_inactive_count;
76
77	while (token) {
78		if (tokens[token].count != 0) {
79			assert(queue->token_q_unripe);
80			if (unripe == 0) {
81				assert(token == queue->token_q_unripe);
82				unripe = token;
83			}
84			page_cnt += tokens[token].count;
85		}
86		if (tokens[token].next == 0)
87			assert(queue->token_q_tail == token);
88
89		token_cnt++;
90		token = tokens[token].next;
91	}
92
93	if (unripe)
94		assert(queue->token_q_unripe == unripe);
95	assert(token_cnt == queue->debug_count_tokens);
96
97	/* obsolete queue doesn't maintain token counts */
98	if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
99	{
100		our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
101		assert(our_inactive_count >= 0);
102		assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
103	}
104}
105#endif
106
107/*
108 * Add a token. Allocate token queue memory if necessary.
109 * Call with page queue locked.
110 */
111kern_return_t
112vm_purgeable_token_add(purgeable_q_t queue)
113{
114#if MACH_ASSERT
115	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
116#endif
117
118	/* new token */
119	token_idx_t     token;
120	enum purgeable_q_type i;
121
122find_available_token:
123
124	if (token_free_idx) {				/* unused tokens available */
125		token = token_free_idx;
126		token_free_idx = tokens[token_free_idx].next;
127	} else if (token_init_idx < token_q_max_cnt) {	/* lazy token array init */
128		token = token_init_idx;
129		token_init_idx++;
130	} else {					/* allocate more memory */
131		/* Wait if another thread is inside the memory alloc section */
132		while(token_q_allocating) {
133			wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
134							  LCK_SLEEP_DEFAULT,
135							  (event_t)&token_q_allocating,
136							  THREAD_UNINT);
137			if(res != THREAD_AWAKENED) return KERN_ABORTED;
138		};
139
140		/* Check whether memory is still maxed out */
141		if(token_init_idx < token_q_max_cnt)
142			goto find_available_token;
143
144		/* Still no memory. Allocate some. */
145		token_q_allocating = 1;
146
147		/* Drop page queue lock so we can allocate */
148		vm_page_unlock_queues();
149
150		struct token *new_loc;
151		vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
152		kern_return_t result;
153
154		if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
155			result = KERN_RESOURCE_SHORTAGE;
156		} else {
157			if (token_q_cur_size) {
158				result = kmem_realloc(kernel_map,
159						      (vm_offset_t) tokens,
160						      token_q_cur_size,
161						      (vm_offset_t *) &new_loc,
162						      alloc_size);
163			} else {
164				result = kmem_alloc(kernel_map,
165						    (vm_offset_t *) &new_loc,
166						    alloc_size);
167			}
168		}
169
170		vm_page_lock_queues();
171
172		if (result) {
173			/* Unblock waiting threads */
174			token_q_allocating = 0;
175			thread_wakeup((event_t)&token_q_allocating);
176			return result;
177		}
178
179		/* If we get here, we allocated new memory. Update pointers and
180		 * dealloc old range */
181		struct token *old_tokens=tokens;
182		tokens=new_loc;
183		vm_size_t old_token_q_cur_size=token_q_cur_size;
184		token_q_cur_size=alloc_size;
185		token_q_max_cnt = (token_idx_t) (token_q_cur_size /
186						 sizeof(struct token));
187		assert (token_init_idx < token_q_max_cnt);	/* We must have a free token now */
188
189		if (old_token_q_cur_size) {	/* clean up old mapping */
190			vm_page_unlock_queues();
191			/* kmem_realloc leaves the old region mapped. Get rid of it. */
192			kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
193			vm_page_lock_queues();
194		}
195
196		/* Unblock waiting threads */
197		token_q_allocating = 0;
198		thread_wakeup((event_t)&token_q_allocating);
199
200		goto find_available_token;
201	}
202
203	assert (token);
204
205	/*
206	 * the new pagecount we got need to be applied to all queues except
207	 * obsolete
208	 */
209	for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
210		int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
211		assert(pages >= 0);
212		assert(pages <= TOKEN_COUNT_MAX);
213		purgeable_queues[i].new_pages = (int32_t) pages;
214		assert(purgeable_queues[i].new_pages == pages);
215	}
216	token_new_pagecount = 0;
217
218	/* set token counter value */
219	if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
220		tokens[token].count = queue->new_pages;
221	else
222		tokens[token].count = 0;	/* all obsolete items are
223						 * ripe immediately */
224	queue->new_pages = 0;
225
226	/* put token on token counter list */
227	tokens[token].next = 0;
228	if (queue->token_q_tail == 0) {
229		assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
230		queue->token_q_head = token;
231	} else {
232		tokens[queue->token_q_tail].next = token;
233	}
234	if (queue->token_q_unripe == 0) {	/* only ripe tokens (token
235						 * count == 0) in queue */
236		if (tokens[token].count > 0)
237			queue->token_q_unripe = token;	/* first unripe token */
238		else
239			available_for_purge++;	/* added a ripe token?
240						 * increase available count */
241	}
242	queue->token_q_tail = token;
243
244#if MACH_ASSERT
245	queue->debug_count_tokens++;
246	/* Check both queues, since we modified the new_pages count on each */
247	vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
248	vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
249
250	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
251			      queue->type,
252			      tokens[token].count,	/* num pages on token
253							 * (last token) */
254			      queue->debug_count_tokens,
255			      0,
256			      0);
257#endif
258
259	return KERN_SUCCESS;
260}
261
262/*
263 * Remove first token from queue and return its index. Add its count to the
264 * count of the next token.
265 * Call with page queue locked.
266 */
267static token_idx_t
268vm_purgeable_token_remove_first(purgeable_q_t queue)
269{
270#if MACH_ASSERT
271	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
272#endif
273
274	token_idx_t     token;
275	token = queue->token_q_head;
276
277	assert(token);
278
279	if (token) {
280		assert(queue->token_q_tail);
281		if (queue->token_q_head == queue->token_q_unripe) {
282			/* no ripe tokens... must move unripe pointer */
283			queue->token_q_unripe = tokens[token].next;
284		} else {
285			/* we're removing a ripe token. decrease count */
286			available_for_purge--;
287			assert(available_for_purge >= 0);
288		}
289
290		if (queue->token_q_tail == queue->token_q_head)
291			assert(tokens[token].next == 0);
292
293		queue->token_q_head = tokens[token].next;
294		if (queue->token_q_head) {
295			tokens[queue->token_q_head].count += tokens[token].count;
296		} else {
297			/* currently no other tokens in the queue */
298			/*
299			 * the page count must be added to the next newly
300			 * created token
301			 */
302			queue->new_pages += tokens[token].count;
303			/* if head is zero, tail is too */
304			queue->token_q_tail = 0;
305		}
306
307#if MACH_ASSERT
308		queue->debug_count_tokens--;
309		vm_purgeable_token_check_queue(queue);
310
311		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
312				      queue->type,
313				      tokens[queue->token_q_head].count,	/* num pages on new
314										 * first token */
315				      token_new_pagecount,	/* num pages waiting for
316								 * next token */
317				      available_for_purge,
318				      0);
319#endif
320	}
321	return token;
322}
323
324static token_idx_t
325vm_purgeable_token_remove_last(purgeable_q_t queue)
326{
327#if MACH_ASSERT
328	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
329#endif
330
331	token_idx_t     token;
332	token = queue->token_q_tail;
333
334	assert(token);
335
336	if (token) {
337		assert(queue->token_q_head);
338
339		if (queue->token_q_tail == queue->token_q_head)
340			assert(tokens[token].next == 0);
341
342		if (queue->token_q_unripe == 0) {
343			/* we're removing a ripe token. decrease count */
344			available_for_purge--;
345			assert(available_for_purge >= 0);
346		} else if (queue->token_q_unripe == token) {
347			/* we're removing the only unripe token */
348			queue->token_q_unripe = 0;
349		}
350
351		if (token == queue->token_q_head) {
352			/* token is the last one in the queue */
353			queue->token_q_head = 0;
354			queue->token_q_tail = 0;
355		} else {
356			token_idx_t new_tail;
357
358			for (new_tail = queue->token_q_head;
359			     tokens[new_tail].next != token && new_tail != 0;
360			     new_tail = tokens[new_tail].next) {
361			}
362			assert(tokens[new_tail].next == token);
363			queue->token_q_tail = new_tail;
364			tokens[new_tail].next = 0;
365		}
366
367		queue->new_pages += tokens[token].count;
368
369#if MACH_ASSERT
370		queue->debug_count_tokens--;
371		vm_purgeable_token_check_queue(queue);
372
373		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
374				      queue->type,
375				      tokens[queue->token_q_head].count,	/* num pages on new
376										 * first token */
377				      token_new_pagecount,	/* num pages waiting for
378								 * next token */
379				      available_for_purge,
380				      0);
381#endif
382	}
383	return token;
384}
385
386/*
387 * Delete first token from queue. Return token to token queue.
388 * Call with page queue locked.
389 */
390void
391vm_purgeable_token_delete_first(purgeable_q_t queue)
392{
393#if MACH_ASSERT
394	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
395#endif
396	token_idx_t     token = vm_purgeable_token_remove_first(queue);
397
398	if (token) {
399		/* stick removed token on free queue */
400		tokens[token].next = token_free_idx;
401		token_free_idx = token;
402	}
403}
404
405void
406vm_purgeable_token_delete_last(purgeable_q_t queue)
407{
408#if MACH_ASSERT
409	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
410#endif
411	token_idx_t     token = vm_purgeable_token_remove_last(queue);
412
413	if (token) {
414		/* stick removed token on free queue */
415		tokens[token].next = token_free_idx;
416		token_free_idx = token;
417	}
418}
419
420
421/* Call with page queue locked. */
422void
423vm_purgeable_q_advance_all()
424{
425#if MACH_ASSERT
426	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
427#endif
428
429	/* check queue counters - if they get really large, scale them back.
430	 * They tend to get that large when there is no purgeable queue action */
431	int i;
432	if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1))	/* a system idling years might get there */
433	{
434		for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
435			int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
436			assert(pages >= 0);
437			assert(pages <= TOKEN_COUNT_MAX);
438			purgeable_queues[i].new_pages = (int32_t) pages;
439			assert(purgeable_queues[i].new_pages == pages);
440		}
441		token_new_pagecount = 0;
442	}
443
444	/*
445	 * Decrement token counters. A token counter can be zero, this means the
446	 * object is ripe to be purged. It is not purged immediately, because that
447	 * could cause several objects to be purged even if purging one would satisfy
448	 * the memory needs. Instead, the pageout thread purges one after the other
449	 * by calling vm_purgeable_object_purge_one and then rechecking the memory
450	 * balance.
451	 *
452	 * No need to advance obsolete queue - all items are ripe there,
453	 * always
454	 */
455	for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
456		purgeable_q_t queue = &purgeable_queues[i];
457		uint32_t num_pages = 1;
458
459		/* Iterate over tokens as long as there are unripe tokens. */
460		while (queue->token_q_unripe) {
461			if (tokens[queue->token_q_unripe].count && num_pages)
462			{
463				tokens[queue->token_q_unripe].count -= 1;
464				num_pages -= 1;
465			}
466
467			if (tokens[queue->token_q_unripe].count == 0) {
468				queue->token_q_unripe = tokens[queue->token_q_unripe].next;
469				available_for_purge++;
470				KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
471						      queue->type,
472						      tokens[queue->token_q_head].count,	/* num pages on new
473											 * first token */
474						      0,
475						      available_for_purge,
476						      0);
477				continue;	/* One token ripened. Make sure to
478						 * check the next. */
479			}
480			if (num_pages == 0)
481				break;	/* Current token not ripe and no more pages.
482					 * Work done. */
483		}
484
485		/*
486		 * if there are no unripe tokens in the queue, decrement the
487		 * new_pages counter instead new_pages can be negative, but must be
488		 * canceled out by token_new_pagecount -- since inactive queue as a
489		 * whole always contains a nonnegative number of pages
490		 */
491		if (!queue->token_q_unripe) {
492			queue->new_pages -= num_pages;
493			assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
494		}
495#if MACH_ASSERT
496		vm_purgeable_token_check_queue(queue);
497#endif
498	}
499}
500
501/*
502 * grab any ripe object and purge it obsolete queue first. then, go through
503 * each volatile group. Select a queue with a ripe token.
504 * Start with first group (0)
505 * 1. Look at queue. Is there an object?
506 *   Yes - purge it. Remove token.
507 *   No - check other queue. Is there an object?
508 *     No - increment group, then go to (1)
509 *     Yes - purge it. Remove token. If there is no ripe token, remove ripe
510 *      token from other queue and migrate unripe token from this
511 *      queue to other queue.
512 * Call with page queue locked.
513 */
514static void
515vm_purgeable_token_remove_ripe(purgeable_q_t queue)
516{
517#if MACH_ASSERT
518	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
519#endif
520	assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
521	/* return token to free list. advance token list. */
522	token_idx_t     new_head = tokens[queue->token_q_head].next;
523	tokens[queue->token_q_head].next = token_free_idx;
524	token_free_idx = queue->token_q_head;
525	queue->token_q_head = new_head;
526	if (new_head == 0)
527		queue->token_q_tail = 0;
528
529#if MACH_ASSERT
530	queue->debug_count_tokens--;
531	vm_purgeable_token_check_queue(queue);
532#endif
533
534	available_for_purge--;
535	assert(available_for_purge >= 0);
536}
537
538/*
539 * Delete a ripe token from the given queue. If there are no ripe tokens on
540 * that queue, delete a ripe token from queue2, and migrate an unripe token
541 * from queue to queue2
542 * Call with page queue locked.
543 */
544static void
545vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
546{
547#if MACH_ASSERT
548	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
549#endif
550	assert(queue->token_q_head);
551
552	if (tokens[queue->token_q_head].count == 0) {
553		/* This queue has a ripe token. Remove. */
554		vm_purgeable_token_remove_ripe(queue);
555	} else {
556		assert(queue2);
557		/*
558		 * queue2 must have a ripe token. Remove, and migrate one
559		 * from queue to queue2.
560		 */
561		vm_purgeable_token_remove_ripe(queue2);
562		/* migrate unripe token */
563		token_idx_t     token;
564		token_cnt_t     count;
565
566		/* remove token from queue1 */
567		assert(queue->token_q_unripe == queue->token_q_head);	/* queue1 had no unripe
568									 * tokens, remember? */
569		token = vm_purgeable_token_remove_first(queue);
570		assert(token);
571
572		count = tokens[token].count;
573
574		/* migrate to queue2 */
575		/* go to migration target loc */
576		token_idx_t    *token_in_queue2 = &queue2->token_q_head;
577		while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
578			count -= tokens[*token_in_queue2].count;
579			token_in_queue2 = &tokens[*token_in_queue2].next;
580		}
581
582		if ((*token_in_queue2 == queue2->token_q_unripe) ||	/* becomes the first
583									 * unripe token */
584		    (queue2->token_q_unripe == 0))
585			queue2->token_q_unripe = token;	/* must update unripe
586							 * pointer */
587
588		/* insert token */
589		tokens[token].count = count;
590		tokens[token].next = *token_in_queue2;
591
592		/*
593		 * if inserting at end, reduce new_pages by that value if
594		 * inserting before token, reduce counter of that token
595		 */
596		if (*token_in_queue2 == 0) {	/* insertion at end of queue2 */
597			queue2->token_q_tail = token;	/* must update tail
598							 * pointer */
599			assert(queue2->new_pages >= (int32_t) count);
600			queue2->new_pages -= count;
601		} else {
602			assert(tokens[*token_in_queue2].count >= count);
603			tokens[*token_in_queue2].count -= count;
604		}
605		*token_in_queue2 = token;
606
607#if MACH_ASSERT
608		queue2->debug_count_tokens++;
609		vm_purgeable_token_check_queue(queue2);
610#endif
611	}
612}
613
614/* Find an object that can be locked. Returns locked object. */
615/* Call with purgeable queue locked. */
616static          vm_object_t
617vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
618{
619	lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
620	/*
621	 * Usually we would pick the first element from a queue. However, we
622	 * might not be able to get a lock on it, in which case we try the
623	 * remaining elements in order.
624	 */
625
626	vm_object_t     object;
627	for (object = (vm_object_t) queue_first(&queue->objq[group]);
628	     !queue_end(&queue->objq[group], (queue_entry_t) object);
629	     object = (vm_object_t) queue_next(&object->objq)) {
630		if (vm_object_lock_try(object)) {
631			/* Locked. Great. We'll take it. Remove and return. */
632			queue_remove(&queue->objq[group], object,
633				     vm_object_t, objq);
634			object->objq.next = 0;
635			object->objq.prev = 0;
636#if MACH_ASSERT
637			queue->debug_count_objects--;
638#endif
639			return object;
640		}
641	}
642
643	return 0;
644}
645
646/* Can be called without holding locks */
647void
648vm_purgeable_object_purge_all(void)
649{
650	enum purgeable_q_type i;
651	int             group;
652	vm_object_t     object;
653	unsigned int	purged_count;
654	uint32_t	collisions;
655
656	purged_count = 0;
657	collisions = 0;
658
659restart:
660	lck_mtx_lock(&vm_purgeable_queue_lock);
661	/* Cycle through all queues */
662	for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
663		purgeable_q_t   queue;
664
665		queue = &purgeable_queues[i];
666
667		/*
668		 * Look through all groups, starting from the lowest. If
669		 * we find an object in that group, try to lock it (this can
670		 * fail). If locking is successful, we can drop the queue
671		 * lock, remove a token and then purge the object.
672		 */
673		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
674			while (!queue_empty(&queue->objq[group])) {
675				object = vm_purgeable_object_find_and_lock(queue, group);
676				if (object == VM_OBJECT_NULL) {
677					lck_mtx_unlock(&vm_purgeable_queue_lock);
678					mutex_pause(collisions++);
679					goto restart;
680				}
681
682				lck_mtx_unlock(&vm_purgeable_queue_lock);
683
684				/* Lock the page queue here so we don't hold it
685				 * over the whole, legthy operation */
686				vm_page_lock_queues();
687				vm_purgeable_token_remove_first(queue);
688				vm_page_unlock_queues();
689
690				assert(object->purgable == VM_PURGABLE_VOLATILE);
691				(void) vm_object_purge(object);
692				vm_object_unlock(object);
693				purged_count++;
694				goto restart;
695			}
696			assert(queue->debug_count_objects >= 0);
697		}
698	}
699	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
700			      purged_count, /* # of purged objects */
701			      0,
702			      available_for_purge,
703			      0,
704			      0);
705	lck_mtx_unlock(&vm_purgeable_queue_lock);
706	return;
707}
708
709boolean_t
710vm_purgeable_object_purge_one(void)
711{
712	enum purgeable_q_type i;
713	int             group;
714	vm_object_t     object = 0;
715	purgeable_q_t   queue, queue2;
716
717	/* Need the page queue lock since we'll be changing the token queue. */
718#if MACH_ASSERT
719	lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
720#endif
721	lck_mtx_lock(&vm_purgeable_queue_lock);
722
723	/* Cycle through all queues */
724	for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
725		queue = &purgeable_queues[i];
726
727		/*
728		 * Are there any ripe tokens on this queue? If yes, we'll
729		 * find an object to purge there
730		 */
731		if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
732			continue;	/* no token? Look at next purgeable
733					 * queue */
734
735		/*
736		 * Now look through all groups, starting from the lowest. If
737		 * we find an object in that group, try to lock it (this can
738		 * fail). If locking is successful, we can drop the queue
739		 * lock, remove a token and then purge the object.
740		 */
741		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
742			if (!queue_empty(&queue->objq[group]) &&
743			    (object = vm_purgeable_object_find_and_lock(queue, group))) {
744				lck_mtx_unlock(&vm_purgeable_queue_lock);
745				vm_purgeable_token_choose_and_delete_ripe(queue, 0);
746				goto purge_now;
747			}
748			if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
749				/* This is the token migration case, and it works between
750				 * FIFO and LIFO only */
751				queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
752							   PURGEABLE_Q_TYPE_FIFO :
753							   PURGEABLE_Q_TYPE_LIFO];
754
755				if (!queue_empty(&queue2->objq[group]) &&
756				    (object = vm_purgeable_object_find_and_lock(queue2, group))) {
757					lck_mtx_unlock(&vm_purgeable_queue_lock);
758					vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
759					goto purge_now;
760				}
761			}
762			assert(queue->debug_count_objects >= 0);
763		}
764	}
765	/*
766         * because we have to do a try_lock on the objects which could fail,
767         * we could end up with no object to purge at this time, even though
768         * we have objects in a purgeable state
769         */
770	lck_mtx_unlock(&vm_purgeable_queue_lock);
771	return FALSE;
772
773purge_now:
774
775	assert(object);
776	assert(object->purgable == VM_PURGABLE_VOLATILE);
777	vm_page_unlock_queues();  /* Unlock for call to vm_object_purge() */
778	(void) vm_object_purge(object);
779	vm_object_unlock(object);
780	vm_page_lock_queues();
781
782	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
783			      object,	/* purged object */
784			      0,
785			      available_for_purge,
786			      0,
787			      0);
788
789	return TRUE;
790}
791
792/* Called with object lock held */
793void
794vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
795{
796	vm_object_lock_assert_exclusive(object);
797	lck_mtx_lock(&vm_purgeable_queue_lock);
798
799	if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
800		group = 0;
801	if (queue->type != PURGEABLE_Q_TYPE_LIFO)	/* fifo and obsolete are
802							 * fifo-queued */
803		queue_enter(&queue->objq[group], object, vm_object_t, objq);	/* last to die */
804	else
805		queue_enter_first(&queue->objq[group], object, vm_object_t, objq);	/* first to die */
806
807#if MACH_ASSERT
808	queue->debug_count_objects++;
809	KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
810			      0,
811			      tokens[queue->token_q_head].count,
812			      queue->type,
813			      group,
814			      0);
815#endif
816
817	lck_mtx_unlock(&vm_purgeable_queue_lock);
818}
819
820/* Look for object. If found, remove from purgeable queue. */
821/* Called with object lock held */
822purgeable_q_t
823vm_purgeable_object_remove(vm_object_t object)
824{
825	enum purgeable_q_type i;
826	int             group;
827
828	vm_object_lock_assert_exclusive(object);
829	lck_mtx_lock(&vm_purgeable_queue_lock);
830
831	for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
832		purgeable_q_t   queue = &purgeable_queues[i];
833		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
834			vm_object_t     o;
835			for (o = (vm_object_t) queue_first(&queue->objq[group]);
836			 !queue_end(&queue->objq[group], (queue_entry_t) o);
837			     o = (vm_object_t) queue_next(&o->objq)) {
838				if (o == object) {
839					queue_remove(&queue->objq[group], object,
840						     vm_object_t, objq);
841#if MACH_ASSERT
842					queue->debug_count_objects--;
843					KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
844							      0,
845					  tokens[queue->token_q_head].count,
846							      queue->type,
847							      group,
848							      0);
849#endif
850					lck_mtx_unlock(&vm_purgeable_queue_lock);
851					object->objq.next = 0;
852					object->objq.prev = 0;
853					return &purgeable_queues[i];
854				}
855			}
856		}
857	}
858	lck_mtx_unlock(&vm_purgeable_queue_lock);
859	return 0;
860}
861