kmem.c revision 6712:79afecec3f3c
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Kernel memory allocator, as described in the following two papers and a
30 * statement about the consolidator:
31 *
32 * Jeff Bonwick,
33 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
34 * Proceedings of the Summer 1994 Usenix Conference.
35 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
36 *
37 * Jeff Bonwick and Jonathan Adams,
38 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
39 * Arbitrary Resources.
40 * Proceedings of the 2001 Usenix Conference.
41 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
42 *
43 * kmem Slab Consolidator Big Theory Statement:
44 *
45 * 1. Motivation
46 *
47 * As stated in Bonwick94, slabs provide the following advantages over other
48 * allocation structures in terms of memory fragmentation:
49 *
50 *  - Internal fragmentation (per-buffer wasted space) is minimal.
51 *  - Severe external fragmentation (unused buffers on the free list) is
52 *    unlikely.
53 *
54 * Segregating objects by size eliminates one source of external fragmentation,
55 * and according to Bonwick:
56 *
57 *   The other reason that slabs reduce external fragmentation is that all
58 *   objects in a slab are of the same type, so they have the same lifetime
59 *   distribution. The resulting segregation of short-lived and long-lived
60 *   objects at slab granularity reduces the likelihood of an entire page being
61 *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
62 *
63 * While unlikely, severe external fragmentation remains possible. Clients that
64 * allocate both short- and long-lived objects from the same cache cannot
65 * anticipate the distribution of long-lived objects within the allocator's slab
66 * implementation. Even a small percentage of long-lived objects distributed
67 * randomly across many slabs can lead to a worst case scenario where the client
68 * frees the majority of its objects and the system gets back almost none of the
69 * slabs. Despite the client doing what it reasonably can to help the system
70 * reclaim memory, the allocator cannot shake free enough slabs because of
71 * lonely allocations stubbornly hanging on. Although the allocator is in a
72 * position to diagnose the fragmentation, there is nothing that the allocator
73 * by itself can do about it. It only takes a single allocated object to prevent
74 * an entire slab from being reclaimed, and any object handed out by
75 * kmem_cache_alloc() is by definition in the client's control. Conversely,
76 * although the client is in a position to move a long-lived object, it has no
77 * way of knowing if the object is causing fragmentation, and if so, where to
78 * move it. A solution necessarily requires further cooperation between the
79 * allocator and the client.
80 *
81 * 2. Move Callback
82 *
83 * The kmem slab consolidator therefore adds a move callback to the
84 * allocator/client interface, improving worst-case external fragmentation in
85 * kmem caches that supply a function to move objects from one memory location
86 * to another. In a situation of low memory kmem attempts to consolidate all of
87 * a cache's slabs at once; otherwise it works slowly to bring external
88 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
89 * thereby helping to avoid a low memory situation in the future.
90 *
91 * The callback has the following signature:
92 *
93 *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
94 *
95 * It supplies the kmem client with two addresses: the allocated object that
96 * kmem wants to move and a buffer selected by kmem for the client to use as the
97 * copy destination. The callback is kmem's way of saying "Please get off of
98 * this buffer and use this one instead." kmem knows where it wants to move the
99 * object in order to best reduce fragmentation. All the client needs to know
100 * about the second argument (void *new) is that it is an allocated, constructed
101 * object ready to take the contents of the old object. When the move function
102 * is called, the system is likely to be low on memory, and the new object
103 * spares the client from having to worry about allocating memory for the
104 * requested move. The third argument supplies the size of the object, in case a
105 * single move function handles multiple caches whose objects differ only in
106 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
107 * user argument passed to the constructor, destructor, and reclaim functions is
108 * also passed to the move callback.
109 *
110 * 2.1 Setting the Move Callback
111 *
112 * The client sets the move callback after creating the cache and before
113 * allocating from it:
114 *
115 *	object_cache = kmem_cache_create(...);
116 *      kmem_cache_set_move(object_cache, object_move);
117 *
118 * 2.2 Move Callback Return Values
119 *
120 * Only the client knows about its own data and when is a good time to move it.
121 * The client is cooperating with kmem to return unused memory to the system,
122 * and kmem respectfully accepts this help at the client's convenience. When
123 * asked to move an object, the client can respond with any of the following:
124 *
125 *   typedef enum kmem_cbrc {
126 *           KMEM_CBRC_YES,
127 *           KMEM_CBRC_NO,
128 *           KMEM_CBRC_LATER,
129 *           KMEM_CBRC_DONT_NEED,
130 *           KMEM_CBRC_DONT_KNOW
131 *   } kmem_cbrc_t;
132 *
133 * The client must not explicitly kmem_cache_free() either of the objects passed
134 * to the callback, since kmem wants to free them directly to the slab layer
135 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
136 * objects to free:
137 *
138 *       YES: (Did it) The client moved the object, so kmem frees the old one.
139 *        NO: (Never) The client refused, so kmem frees the new object (the
140 *            unused copy destination). kmem also marks the slab of the old
141 *            object so as not to bother the client with further callbacks for
142 *            that object as long as the slab remains on the partial slab list.
143 *            (The system won't be getting the slab back as long as the
144 *            immovable object holds it hostage, so there's no point in moving
145 *            any of its objects.)
146 *     LATER: The client is using the object and cannot move it now, so kmem
147 *            frees the new object (the unused copy destination). kmem still
148 *            attempts to move other objects off the slab, since it expects to
149 *            succeed in clearing the slab in a later callback. The client
150 *            should use LATER instead of NO if the object is likely to become
151 *            movable very soon.
152 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
153 *            with the new object (the unused copy destination). This response
154 *            is the client's opportunity to be a model citizen and give back as
155 *            much as it can.
156 * DONT_KNOW: The client does not know about the object because
157 *            a) the client has just allocated the object and not yet put it
158 *               wherever it expects to find known objects
159 *            b) the client has removed the object from wherever it expects to
160 *               find known objects and is about to free it, or
161 *            c) the client has freed the object.
162 *            In all these cases (a, b, and c) kmem frees the new object (the
163 *            unused copy destination) and searches for the old object in the
164 *            magazine layer. If found, the object is removed from the magazine
165 *            layer and freed to the slab layer so it will no longer hold the
166 *            slab hostage.
167 *
168 * 2.3 Object States
169 *
170 * Neither kmem nor the client can be assumed to know the object's whereabouts
171 * at the time of the callback. An object belonging to a kmem cache may be in
172 * any of the following states:
173 *
174 * 1. Uninitialized on the slab
175 * 2. Allocated from the slab but not constructed (still uninitialized)
176 * 3. Allocated from the slab, constructed, but not yet ready for business
177 *    (not in a valid state for the move callback)
178 * 4. In use (valid and known to the client)
179 * 5. About to be freed (no longer in a valid state for the move callback)
180 * 6. Freed to a magazine (still constructed)
181 * 7. Allocated from a magazine, not yet ready for business (not in a valid
182 *    state for the move callback), and about to return to state #4
183 * 8. Deconstructed on a magazine that is about to be freed
184 * 9. Freed to the slab
185 *
186 * Since the move callback may be called at any time while the object is in any
187 * of the above states (except state #1), the client needs a safe way to
188 * determine whether or not it knows about the object. Specifically, the client
189 * needs to know whether or not the object is in state #4, the only state in
190 * which a move is valid. If the object is in any other state, the client should
191 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
192 * the object's fields.
193 *
194 * Note that although an object may be in state #4 when kmem initiates the move
195 * request, the object may no longer be in that state by the time kmem actually
196 * calls the move function. Not only does the client free objects
197 * asynchronously, kmem itself puts move requests on a queue where thay are
198 * pending until kmem processes them from another context. Also, objects freed
199 * to a magazine appear allocated from the point of view of the slab layer, so
200 * kmem may even initiate requests for objects in a state other than state #4.
201 *
202 * 2.3.1 Magazine Layer
203 *
204 * An important insight revealed by the states listed above is that the magazine
205 * layer is populated only by kmem_cache_free(). Magazines of constructed
206 * objects are never populated directly from the slab layer (which contains raw,
207 * unconstructed objects). Whenever an allocation request cannot be satisfied
208 * from the magazine layer, the magazines are bypassed and the request is
209 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
210 * the object constructor only when allocating from the slab layer, and only in
211 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
212 * the move callback. kmem does not preconstruct objects in anticipation of
213 * kmem_cache_alloc().
214 *
215 * 2.3.2 Object Constructor and Destructor
216 *
217 * If the client supplies a destructor, it must be valid to call the destructor
218 * on a newly created object (immediately after the constructor).
219 *
220 * 2.4 Recognizing Known Objects
221 *
222 * There is a simple test to determine safely whether or not the client knows
223 * about a given object in the move callback. It relies on the fact that kmem
224 * guarantees that the object of the move callback has only been touched by the
225 * client itself or else by kmem. kmem does this by ensuring that none of the
226 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
227 * callback is pending. When the last object on a slab is freed, if there is a
228 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
229 * slabs on that list until all pending callbacks are completed. That way,
230 * clients can be certain that the object of a move callback is in one of the
231 * states listed above, making it possible to distinguish known objects (in
232 * state #4) using the two low order bits of any pointer member (with the
233 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
234 * platforms).
235 *
236 * The test works as long as the client always transitions objects from state #4
237 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
238 * order bit of the client-designated pointer member. Since kmem only writes
239 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
240 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
241 * guaranteed to set at least one of the two low order bits. Therefore, given an
242 * object with a back pointer to a 'container_t *o_container', the client can
243 * test
244 *
245 *      container_t *container = object->o_container;
246 *      if ((uintptr_t)container & 0x3) {
247 *              return (KMEM_CBRC_DONT_KNOW);
248 *      }
249 *
250 * Typically, an object will have a pointer to some structure with a list or
251 * hash where objects from the cache are kept while in use. Assuming that the
252 * client has some way of knowing that the container structure is valid and will
253 * not go away during the move, and assuming that the structure includes a lock
254 * to protect whatever collection is used, then the client would continue as
255 * follows:
256 *
257 *	// Ensure that the container structure does not go away.
258 *      if (container_hold(container) == 0) {
259 *              return (KMEM_CBRC_DONT_KNOW);
260 *      }
261 *      mutex_enter(&container->c_objects_lock);
262 *      if (container != object->o_container) {
263 *              mutex_exit(&container->c_objects_lock);
264 *              container_rele(container);
265 *              return (KMEM_CBRC_DONT_KNOW);
266 *      }
267 *
268 * At this point the client knows that the object cannot be freed as long as
269 * c_objects_lock is held. Note that after acquiring the lock, the client must
270 * recheck the o_container pointer in case the object was removed just before
271 * acquiring the lock.
272 *
273 * When the client is about to free an object, it must first remove that object
274 * from the list, hash, or other structure where it is kept. At that time, to
275 * mark the object so it can be distinguished from the remaining, known objects,
276 * the client sets the designated low order bit:
277 *
278 *      mutex_enter(&container->c_objects_lock);
279 *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
280 *      list_remove(&container->c_objects, object);
281 *      mutex_exit(&container->c_objects_lock);
282 *
283 * In the common case, the object is freed to the magazine layer, where it may
284 * be reused on a subsequent allocation without the overhead of calling the
285 * constructor. While in the magazine it appears allocated from the point of
286 * view of the slab layer, making it a candidate for the move callback. Most
287 * objects unrecognized by the client in the move callback fall into this
288 * category and are cheaply distinguished from known objects by the test
289 * described earlier. Since recognition is cheap for the client, and searching
290 * magazines is expensive for kmem, kmem defers searching until the client first
291 * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem
292 * elsewhere does what it can to avoid bothering the client unnecessarily.
293 *
294 * Invalidating the designated pointer member before freeing the object marks
295 * the object to be avoided in the callback, and conversely, assigning a valid
296 * value to the designated pointer member after allocating the object makes the
297 * object fair game for the callback:
298 *
299 *      ... allocate object ...
300 *      ... set any initial state not set by the constructor ...
301 *
302 *      mutex_enter(&container->c_objects_lock);
303 *      list_insert_tail(&container->c_objects, object);
304 *      membar_producer();
305 *      object->o_container = container;
306 *      mutex_exit(&container->c_objects_lock);
307 *
308 * Note that everything else must be valid before setting o_container makes the
309 * object fair game for the move callback. The membar_producer() call ensures
310 * that all the object's state is written to memory before setting the pointer
311 * that transitions the object from state #3 or #7 (allocated, constructed, not
312 * yet in use) to state #4 (in use, valid). That's important because the move
313 * function has to check the validity of the pointer before it can safely
314 * acquire the lock protecting the collection where it expects to find known
315 * objects.
316 *
317 * This method of distinguishing known objects observes the usual symmetry:
318 * invalidating the designated pointer is the first thing the client does before
319 * freeing the object, and setting the designated pointer is the last thing the
320 * client does after allocating the object. Of course, the client is not
321 * required to use this method. Fundamentally, how the client recognizes known
322 * objects is completely up to the client, but this method is recommended as an
323 * efficient and safe way to take advantage of the guarantees made by kmem. If
324 * the entire object is arbitrary data without any markable bits from a suitable
325 * pointer member, then the client must find some other method, such as
326 * searching a hash table of known objects.
327 *
328 * 2.5 Preventing Objects From Moving
329 *
330 * Besides a way to distinguish known objects, the other thing that the client
331 * needs is a strategy to ensure that an object will not move while the client
332 * is actively using it. The details of satisfying this requirement tend to be
333 * highly cache-specific. It might seem that the same rules that let a client
334 * remove an object safely should also decide when an object can be moved
335 * safely. However, any object state that makes a removal attempt invalid is
336 * likely to be long-lasting for objects that the client does not expect to
337 * remove. kmem knows nothing about the object state and is equally likely (from
338 * the client's point of view) to request a move for any object in the cache,
339 * whether prepared for removal or not. Even a low percentage of objects stuck
340 * in place by unremovability will defeat the consolidator if the stuck objects
341 * are the same long-lived allocations likely to hold slabs hostage.
342 * Fundamentally, the consolidator is not aimed at common cases. Severe external
343 * fragmentation is a worst case scenario manifested as sparsely allocated
344 * slabs, by definition a low percentage of the cache's objects. When deciding
345 * what makes an object movable, keep in mind the goal of the consolidator: to
346 * bring worst-case external fragmentation within the limits guaranteed for
347 * internal fragmentation. Removability is a poor criterion if it is likely to
348 * exclude more than an insignificant percentage of objects for long periods of
349 * time.
350 *
351 * A tricky general solution exists, and it has the advantage of letting you
352 * move any object at almost any moment, practically eliminating the likelihood
353 * that an object can hold a slab hostage. However, if there is a cache-specific
354 * way to ensure that an object is not actively in use in the vast majority of
355 * cases, a simpler solution that leverages this cache-specific knowledge is
356 * preferred.
357 *
358 * 2.5.1 Cache-Specific Solution
359 *
360 * As an example of a cache-specific solution, the ZFS znode cache takes
361 * advantage of the fact that the vast majority of znodes are only being
362 * referenced from the DNLC. (A typical case might be a few hundred in active
363 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
364 * client has established that it recognizes the znode and can access its fields
365 * safely (using the method described earlier), it then tests whether the znode
366 * is referenced by anything other than the DNLC. If so, it assumes that the
367 * znode may be in active use and is unsafe to move, so it drops its locks and
368 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
369 * else znodes are used, no change is needed to protect against the possibility
370 * of the znode moving. The disadvantage is that it remains possible for an
371 * application to hold a znode slab hostage with an open file descriptor.
372 * However, this case ought to be rare and the consolidator has a way to deal
373 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
374 * object, kmem eventually stops believing it and treats the slab as if the
375 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
376 * then focus on getting it off of the partial slab list by allocating rather
377 * than freeing all of its objects. (Either way of getting a slab off the
378 * free list reduces fragmentation.)
379 *
380 * 2.5.2 General Solution
381 *
382 * The general solution, on the other hand, requires an explicit hold everywhere
383 * the object is used to prevent it from moving. To keep the client locking
384 * strategy as uncomplicated as possible, kmem guarantees the simplifying
385 * assumption that move callbacks are sequential, even across multiple caches.
386 * Internally, a global queue processed by a single thread supports all caches
387 * implementing the callback function. No matter how many caches supply a move
388 * function, the consolidator never moves more than one object at a time, so the
389 * client does not have to worry about tricky lock ordering involving several
390 * related objects from different kmem caches.
391 *
392 * The general solution implements the explicit hold as a read-write lock, which
393 * allows multiple readers to access an object from the cache simultaneously
394 * while a single writer is excluded from moving it. A single rwlock for the
395 * entire cache would lock out all threads from using any of the cache's objects
396 * even though only a single object is being moved, so to reduce contention,
397 * the client can fan out the single rwlock into an array of rwlocks hashed by
398 * the object address, making it probable that moving one object will not
399 * prevent other threads from using a different object. The rwlock cannot be a
400 * member of the object itself, because the possibility of the object moving
401 * makes it unsafe to access any of the object's fields until the lock is
402 * acquired.
403 *
404 * Assuming a small, fixed number of locks, it's possible that multiple objects
405 * will hash to the same lock. A thread that needs to use multiple objects in
406 * the same function may acquire the same lock multiple times. Since rwlocks are
407 * reentrant for readers, and since there is never more than a single writer at
408 * a time (assuming that the client acquires the lock as a writer only when
409 * moving an object inside the callback), there would seem to be no problem.
410 * However, a client locking multiple objects in the same function must handle
411 * one case of potential deadlock: Assume that thread A needs to prevent both
412 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
413 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
414 * same lock, that thread A will acquire the lock for object 1 as a reader
415 * before thread B sets the lock's write-wanted bit, preventing thread A from
416 * reacquiring the lock for object 2 as a reader. Unable to make forward
417 * progress, thread A will never release the lock for object 1, resulting in
418 * deadlock.
419 *
420 * There are two ways of avoiding the deadlock just described. The first is to
421 * use rw_tryenter() rather than rw_enter() in the callback function when
422 * attempting to acquire the lock as a writer. If tryenter discovers that the
423 * same object (or another object hashed to the same lock) is already in use, it
424 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
425 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
426 * since it allows a thread to acquire the lock as a reader in spite of a
427 * waiting writer. This second approach insists on moving the object now, no
428 * matter how many readers the move function must wait for in order to do so,
429 * and could delay the completion of the callback indefinitely (blocking
430 * callbacks to other clients). In practice, a less insistent callback using
431 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
432 * little reason to use anything else.
433 *
434 * Avoiding deadlock is not the only problem that an implementation using an
435 * explicit hold needs to solve. Locking the object in the first place (to
436 * prevent it from moving) remains a problem, since the object could move
437 * between the time you obtain a pointer to the object and the time you acquire
438 * the rwlock hashed to that pointer value. Therefore the client needs to
439 * recheck the value of the pointer after acquiring the lock, drop the lock if
440 * the value has changed, and try again. This requires a level of indirection:
441 * something that points to the object rather than the object itself, that the
442 * client can access safely while attempting to acquire the lock. (The object
443 * itself cannot be referenced safely because it can move at any time.)
444 * The following lock-acquisition function takes whatever is safe to reference
445 * (arg), follows its pointer to the object (using function f), and tries as
446 * often as necessary to acquire the hashed lock and verify that the object
447 * still has not moved:
448 *
449 *      object_t *
450 *      object_hold(object_f f, void *arg)
451 *      {
452 *              object_t *op;
453 *
454 *              op = f(arg);
455 *              if (op == NULL) {
456 *                      return (NULL);
457 *              }
458 *
459 *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
460 *              while (op != f(arg)) {
461 *                      rw_exit(OBJECT_RWLOCK(op));
462 *                      op = f(arg);
463 *                      if (op == NULL) {
464 *                              break;
465 *                      }
466 *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
467 *              }
468 *
469 *              return (op);
470 *      }
471 *
472 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
473 * lock reacquisition loop, while necessary, almost never executes. The function
474 * pointer f (used to obtain the object pointer from arg) has the following type
475 * definition:
476 *
477 *      typedef object_t *(*object_f)(void *arg);
478 *
479 * An object_f implementation is likely to be as simple as accessing a structure
480 * member:
481 *
482 *      object_t *
483 *      s_object(void *arg)
484 *      {
485 *              something_t *sp = arg;
486 *              return (sp->s_object);
487 *      }
488 *
489 * The flexibility of a function pointer allows the path to the object to be
490 * arbitrarily complex and also supports the notion that depending on where you
491 * are using the object, you may need to get it from someplace different.
492 *
493 * The function that releases the explicit hold is simpler because it does not
494 * have to worry about the object moving:
495 *
496 *      void
497 *      object_rele(object_t *op)
498 *      {
499 *              rw_exit(OBJECT_RWLOCK(op));
500 *      }
501 *
502 * The caller is spared these details so that obtaining and releasing an
503 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
504 * of object_hold() only needs to know that the returned object pointer is valid
505 * if not NULL and that the object will not move until released.
506 *
507 * Although object_hold() prevents an object from moving, it does not prevent it
508 * from being freed. The caller must take measures before calling object_hold()
509 * (afterwards is too late) to ensure that the held object cannot be freed. The
510 * caller must do so without accessing the unsafe object reference, so any lock
511 * or reference count used to ensure the continued existence of the object must
512 * live outside the object itself.
513 *
514 * Obtaining a new object is a special case where an explicit hold is impossible
515 * for the caller. Any function that returns a newly allocated object (either as
516 * a return value, or as an in-out paramter) must return it already held; after
517 * the caller gets it is too late, since the object cannot be safely accessed
518 * without the level of indirection described earlier. The following
519 * object_alloc() example uses the same code shown earlier to transition a new
520 * object into the state of being recognized (by the client) as a known object.
521 * The function must acquire the hold (rw_enter) before that state transition
522 * makes the object movable:
523 *
524 *      static object_t *
525 *      object_alloc(container_t *container)
526 *      {
527 *              object_t *object = kmem_cache_create(object_cache, 0);
528 *              ... set any initial state not set by the constructor ...
529 *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
530 *              mutex_enter(&container->c_objects_lock);
531 *              list_insert_tail(&container->c_objects, object);
532 *              membar_producer();
533 *              object->o_container = container;
534 *              mutex_exit(&container->c_objects_lock);
535 *              return (object);
536 *      }
537 *
538 * Functions that implicitly acquire an object hold (any function that calls
539 * object_alloc() to supply an object for the caller) need to be carefully noted
540 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
541 * prevent all objects hashed to the affected rwlocks from ever being moved.
542 *
543 * The pointer to a held object can be hashed to the holding rwlock even after
544 * the object has been freed. Although it is possible to release the hold
545 * after freeing the object, you may decide to release the hold implicitly in
546 * whatever function frees the object, so as to release the hold as soon as
547 * possible, and for the sake of symmetry with the function that implicitly
548 * acquires the hold when it allocates the object. Here, object_free() releases
549 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
550 * matching pair with object_hold():
551 *
552 *      void
553 *      object_free(object_t *object)
554 *      {
555 *              container_t *container;
556 *
557 *              ASSERT(object_held(object));
558 *              container = object->o_container;
559 *              mutex_enter(&container->c_objects_lock);
560 *              object->o_container =
561 *                  (void *)((uintptr_t)object->o_container | 0x1);
562 *              list_remove(&container->c_objects, object);
563 *              mutex_exit(&container->c_objects_lock);
564 *              object_rele(object);
565 *              kmem_cache_free(object_cache, object);
566 *      }
567 *
568 * Note that object_free() cannot safely accept an object pointer as an argument
569 * unless the object is already held. Any function that calls object_free()
570 * needs to be carefully noted since it similarly forms a matching pair with
571 * object_hold().
572 *
573 * To complete the picture, the following callback function implements the
574 * general solution by moving objects only if they are currently unheld:
575 *
576 *      static kmem_cbrc_t
577 *      object_move(void *buf, void *newbuf, size_t size, void *arg)
578 *      {
579 *              object_t *op = buf, *np = newbuf;
580 *              container_t *container;
581 *
582 *              container = op->o_container;
583 *              if ((uintptr_t)container & 0x3) {
584 *                      return (KMEM_CBRC_DONT_KNOW);
585 *              }
586 *
587 *	        // Ensure that the container structure does not go away.
588 *              if (container_hold(container) == 0) {
589 *                      return (KMEM_CBRC_DONT_KNOW);
590 *              }
591 *
592 *              mutex_enter(&container->c_objects_lock);
593 *              if (container != op->o_container) {
594 *                      mutex_exit(&container->c_objects_lock);
595 *                      container_rele(container);
596 *                      return (KMEM_CBRC_DONT_KNOW);
597 *              }
598 *
599 *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
600 *                      mutex_exit(&container->c_objects_lock);
601 *                      container_rele(container);
602 *                      return (KMEM_CBRC_LATER);
603 *              }
604 *
605 *              object_move_impl(op, np); // critical section
606 *              rw_exit(OBJECT_RWLOCK(op));
607 *
608 *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
609 *              list_link_replace(&op->o_link_node, &np->o_link_node);
610 *              mutex_exit(&container->c_objects_lock);
611 *              container_rele(container);
612 *              return (KMEM_CBRC_YES);
613 *      }
614 *
615 * Note that object_move() must invalidate the designated o_container pointer of
616 * the old object in the same way that object_free() does, since kmem will free
617 * the object in response to the KMEM_CBRC_YES return value.
618 *
619 * The lock order in object_move() differs from object_alloc(), which locks
620 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
621 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
622 * not a problem. Holding the lock on the object list in the example above
623 * through the entire callback not only prevents the object from going away, it
624 * also allows you to lock the list elsewhere and know that none of its elements
625 * will move during iteration.
626 *
627 * Adding an explicit hold everywhere an object from the cache is used is tricky
628 * and involves much more change to client code than a cache-specific solution
629 * that leverages existing state to decide whether or not an object is
630 * movable. However, this approach has the advantage that no object remains
631 * immovable for any significant length of time, making it extremely unlikely
632 * that long-lived allocations can continue holding slabs hostage; and it works
633 * for any cache.
634 *
635 * 3. Consolidator Implementation
636 *
637 * Once the client supplies a move function that a) recognizes known objects and
638 * b) avoids moving objects that are actively in use, the remaining work is up
639 * to the consolidator to decide which objects to move and when to issue
640 * callbacks.
641 *
642 * The consolidator relies on the fact that a cache's slabs are ordered by
643 * usage. Each slab has a fixed number of objects. Depending on the slab's
644 * "color" (the offset of the first object from the beginning of the slab;
645 * offsets are staggered to mitigate false sharing of cache lines) it is either
646 * the maximum number of objects per slab determined at cache creation time or
647 * else the number closest to the maximum that fits within the space remaining
648 * after the initial offset. A completely allocated slab may contribute some
649 * internal fragmentation (per-slab overhead) but no external fragmentation, so
650 * it is of no interest to the consolidator. At the other extreme, slabs whose
651 * objects have all been freed to the slab are released to the virtual memory
652 * (VM) subsystem (objects freed to magazines are still allocated as far as the
653 * slab is concerned). External fragmentation exists when there are slabs
654 * somewhere between these extremes. A partial slab has at least one but not all
655 * of its objects allocated. The more partial slabs, and the fewer allocated
656 * objects on each of them, the higher the fragmentation. Hence the
657 * consolidator's overall strategy is to reduce the number of partial slabs by
658 * moving allocated objects from the least allocated slabs to the most allocated
659 * slabs.
660 *
661 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
662 * slabs are kept separately in an unordered list. Since the majority of slabs
663 * tend to be completely allocated (a typical unfragmented cache may have
664 * thousands of complete slabs and only a single partial slab), separating
665 * complete slabs improves the efficiency of partial slab ordering, since the
666 * complete slabs do not affect the depth or balance of the AVL tree. This
667 * ordered sequence of partial slabs acts as a "free list" supplying objects for
668 * allocation requests.
669 *
670 * Objects are always allocated from the first partial slab in the free list,
671 * where the allocation is most likely to eliminate a partial slab (by
672 * completely allocating it). Conversely, when a single object from a completely
673 * allocated slab is freed to the slab, that slab is added to the front of the
674 * free list. Since most free list activity involves highly allocated slabs
675 * coming and going at the front of the list, slabs tend naturally toward the
676 * ideal order: highly allocated at the front, sparsely allocated at the back.
677 * Slabs with few allocated objects are likely to become completely free if they
678 * keep a safe distance away from the front of the free list. Slab misorders
679 * interfere with the natural tendency of slabs to become completely free or
680 * completely allocated. For example, a slab with a single allocated object
681 * needs only a single free to escape the cache; its natural desire is
682 * frustrated when it finds itself at the front of the list where a second
683 * allocation happens just before the free could have released it. Another slab
684 * with all but one object allocated might have supplied the buffer instead, so
685 * that both (as opposed to neither) of the slabs would have been taken off the
686 * free list.
687 *
688 * Although slabs tend naturally toward the ideal order, misorders allowed by a
689 * simple list implementation defeat the consolidator's strategy of merging
690 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
691 * needs another way to fix misorders to optimize its callback strategy. One
692 * approach is to periodically scan a limited number of slabs, advancing a
693 * marker to hold the current scan position, and to move extreme misorders to
694 * the front or back of the free list and to the front or back of the current
695 * scan range. By making consecutive scan ranges overlap by one slab, the least
696 * allocated slab in the current range can be carried along from the end of one
697 * scan to the start of the next.
698 *
699 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
700 * task, however. Since most of the cache's activity is in the magazine layer,
701 * and allocations from the slab layer represent only a startup cost, the
702 * overhead of maintaining a balanced tree is not a significant concern compared
703 * to the opportunity of reducing complexity by eliminating the partial slab
704 * scanner just described. The overhead of an AVL tree is minimized by
705 * maintaining only partial slabs in the tree and keeping completely allocated
706 * slabs separately in a list. To avoid increasing the size of the slab
707 * structure the AVL linkage pointers are reused for the slab's list linkage,
708 * since the slab will always be either partial or complete, never stored both
709 * ways at the same time. To further minimize the overhead of the AVL tree the
710 * compare function that orders partial slabs by usage divides the range of
711 * allocated object counts into bins such that counts within the same bin are
712 * considered equal. Binning partial slabs makes it less likely that allocating
713 * or freeing a single object will change the slab's order, requiring a tree
714 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
715 * requiring some rebalancing of the tree). Allocation counts closest to
716 * completely free and completely allocated are left unbinned (finely sorted) to
717 * better support the consolidator's strategy of merging slabs at either
718 * extreme.
719 *
720 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
721 *
722 * The consolidator piggybacks on the kmem maintenance thread and is called on
723 * the same interval as kmem_cache_update(), once per cache every fifteen
724 * seconds. kmem maintains a running count of unallocated objects in the slab
725 * layer (cache_bufslab). The consolidator checks whether that number exceeds
726 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
727 * there is a significant number of slabs in the cache (arbitrarily a minimum
728 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
729 * working set are included in the assessment, and magazines in the depot are
730 * reaped if those objects would lift cache_bufslab above the fragmentation
731 * threshold. Once the consolidator decides that a cache is fragmented, it looks
732 * for a candidate slab to reclaim, starting at the end of the partial slab free
733 * list and scanning backwards. At first the consolidator is choosy: only a slab
734 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
735 * single allocated object, regardless of percentage). If there is difficulty
736 * finding a candidate slab, kmem raises the allocation threshold incrementally,
737 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
738 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
739 * even in the worst case of every slab in the cache being almost 7/8 allocated.
740 * The threshold can also be lowered incrementally when candidate slabs are easy
741 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
742 * is no longer fragmented.
743 *
744 * 3.2 Generating Callbacks
745 *
746 * Once an eligible slab is chosen, a callback is generated for every allocated
747 * object on the slab, in the hope that the client will move everything off the
748 * slab and make it reclaimable. Objects selected as move destinations are
749 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
750 * order (most allocated at the front, least allocated at the back) and a
751 * cooperative client, the consolidator will succeed in removing slabs from both
752 * ends of the free list, completely allocating on the one hand and completely
753 * freeing on the other. Objects selected as move destinations are allocated in
754 * the kmem maintenance thread where move requests are enqueued. A separate
755 * callback thread removes pending callbacks from the queue and calls the
756 * client. The separate thread ensures that client code (the move function) does
757 * not interfere with internal kmem maintenance tasks. A map of pending
758 * callbacks keyed by object address (the object to be moved) is checked to
759 * ensure that duplicate callbacks are not generated for the same object.
760 * Allocating the move destination (the object to move to) prevents subsequent
761 * callbacks from selecting the same destination as an earlier pending callback.
762 *
763 * Move requests can also be generated by kmem_cache_reap() when the system is
764 * desperate for memory and by kmem_cache_move_notify(), called by the client to
765 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
766 * The map of pending callbacks is protected by the same lock that protects the
767 * slab layer.
768 *
769 * When the system is desperate for memory, kmem does not bother to determine
770 * whether or not the cache exceeds the fragmentation threshold, but tries to
771 * consolidate as many slabs as possible. Normally, the consolidator chews
772 * slowly, one sparsely allocated slab at a time during each maintenance
773 * interval that the cache is fragmented. When desperate, the consolidator
774 * starts at the last partial slab and enqueues callbacks for every allocated
775 * object on every partial slab, working backwards until it reaches the first
776 * partial slab. The first partial slab, meanwhile, advances in pace with the
777 * consolidator as allocations to supply move destinations for the enqueued
778 * callbacks use up the highly allocated slabs at the front of the free list.
779 * Ideally, the overgrown free list collapses like an accordion, starting at
780 * both ends and ending at the center with a single partial slab.
781 *
782 * 3.3 Client Responses
783 *
784 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
785 * marks the slab that supplied the stuck object non-reclaimable and moves it to
786 * front of the free list. The slab remains marked as long as it remains on the
787 * free list, and it appears more allocated to the partial slab compare function
788 * than any unmarked slab, no matter how many of its objects are allocated.
789 * Since even one immovable object ties up the entire slab, the goal is to
790 * completely allocate any slab that cannot be completely freed. kmem does not
791 * bother generating callbacks to move objects from a marked slab unless the
792 * system is desperate.
793 *
794 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
795 * slab. If the client responds LATER too many times, kmem disbelieves and
796 * treats the response as a NO. The count is cleared when the slab is taken off
797 * the partial slab list or when the client moves one of the slab's objects.
798 *
799 * 4. Observability
800 *
801 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
802 * the ::kmem_slabs dcmd. For a complete description of the command, enter
803 * '::help kmem_slabs' at the mdb prompt.
804 */
805
806#include <sys/kmem_impl.h>
807#include <sys/vmem_impl.h>
808#include <sys/param.h>
809#include <sys/sysmacros.h>
810#include <sys/vm.h>
811#include <sys/proc.h>
812#include <sys/tuneable.h>
813#include <sys/systm.h>
814#include <sys/cmn_err.h>
815#include <sys/debug.h>
816#include <sys/sdt.h>
817#include <sys/mutex.h>
818#include <sys/bitmap.h>
819#include <sys/atomic.h>
820#include <sys/kobj.h>
821#include <sys/disp.h>
822#include <vm/seg_kmem.h>
823#include <sys/log.h>
824#include <sys/callb.h>
825#include <sys/taskq.h>
826#include <sys/modctl.h>
827#include <sys/reboot.h>
828#include <sys/id32.h>
829#include <sys/zone.h>
830#include <sys/netstack.h>
831#ifdef	DEBUG
832#include <sys/random.h>
833#endif
834
835extern void streams_msg_init(void);
836extern int segkp_fromheap;
837extern void segkp_cache_free(void);
838
839struct kmem_cache_kstat {
840	kstat_named_t	kmc_buf_size;
841	kstat_named_t	kmc_align;
842	kstat_named_t	kmc_chunk_size;
843	kstat_named_t	kmc_slab_size;
844	kstat_named_t	kmc_alloc;
845	kstat_named_t	kmc_alloc_fail;
846	kstat_named_t	kmc_free;
847	kstat_named_t	kmc_depot_alloc;
848	kstat_named_t	kmc_depot_free;
849	kstat_named_t	kmc_depot_contention;
850	kstat_named_t	kmc_slab_alloc;
851	kstat_named_t	kmc_slab_free;
852	kstat_named_t	kmc_buf_constructed;
853	kstat_named_t	kmc_buf_avail;
854	kstat_named_t	kmc_buf_inuse;
855	kstat_named_t	kmc_buf_total;
856	kstat_named_t	kmc_buf_max;
857	kstat_named_t	kmc_slab_create;
858	kstat_named_t	kmc_slab_destroy;
859	kstat_named_t	kmc_vmem_source;
860	kstat_named_t	kmc_hash_size;
861	kstat_named_t	kmc_hash_lookup_depth;
862	kstat_named_t	kmc_hash_rescale;
863	kstat_named_t	kmc_full_magazines;
864	kstat_named_t	kmc_empty_magazines;
865	kstat_named_t	kmc_magazine_size;
866	kstat_named_t	kmc_move_callbacks;
867	kstat_named_t	kmc_move_yes;
868	kstat_named_t	kmc_move_no;
869	kstat_named_t	kmc_move_later;
870	kstat_named_t	kmc_move_dont_need;
871	kstat_named_t	kmc_move_dont_know;
872	kstat_named_t	kmc_move_hunt_found;
873} kmem_cache_kstat = {
874	{ "buf_size",		KSTAT_DATA_UINT64 },
875	{ "align",		KSTAT_DATA_UINT64 },
876	{ "chunk_size",		KSTAT_DATA_UINT64 },
877	{ "slab_size",		KSTAT_DATA_UINT64 },
878	{ "alloc",		KSTAT_DATA_UINT64 },
879	{ "alloc_fail",		KSTAT_DATA_UINT64 },
880	{ "free",		KSTAT_DATA_UINT64 },
881	{ "depot_alloc",	KSTAT_DATA_UINT64 },
882	{ "depot_free",		KSTAT_DATA_UINT64 },
883	{ "depot_contention",	KSTAT_DATA_UINT64 },
884	{ "slab_alloc",		KSTAT_DATA_UINT64 },
885	{ "slab_free",		KSTAT_DATA_UINT64 },
886	{ "buf_constructed",	KSTAT_DATA_UINT64 },
887	{ "buf_avail",		KSTAT_DATA_UINT64 },
888	{ "buf_inuse",		KSTAT_DATA_UINT64 },
889	{ "buf_total",		KSTAT_DATA_UINT64 },
890	{ "buf_max",		KSTAT_DATA_UINT64 },
891	{ "slab_create",	KSTAT_DATA_UINT64 },
892	{ "slab_destroy",	KSTAT_DATA_UINT64 },
893	{ "vmem_source",	KSTAT_DATA_UINT64 },
894	{ "hash_size",		KSTAT_DATA_UINT64 },
895	{ "hash_lookup_depth",	KSTAT_DATA_UINT64 },
896	{ "hash_rescale",	KSTAT_DATA_UINT64 },
897	{ "full_magazines",	KSTAT_DATA_UINT64 },
898	{ "empty_magazines",	KSTAT_DATA_UINT64 },
899	{ "magazine_size",	KSTAT_DATA_UINT64 },
900	{ "move_callbacks",	KSTAT_DATA_UINT64 },
901	{ "move_yes",		KSTAT_DATA_UINT64 },
902	{ "move_no",		KSTAT_DATA_UINT64 },
903	{ "move_later",		KSTAT_DATA_UINT64 },
904	{ "move_dont_need",	KSTAT_DATA_UINT64 },
905	{ "move_dont_know",	KSTAT_DATA_UINT64 },
906	{ "move_hunt_found",	KSTAT_DATA_UINT64 },
907};
908
909static kmutex_t kmem_cache_kstat_lock;
910
911/*
912 * The default set of caches to back kmem_alloc().
913 * These sizes should be reevaluated periodically.
914 *
915 * We want allocations that are multiples of the coherency granularity
916 * (64 bytes) to be satisfied from a cache which is a multiple of 64
917 * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
918 * the next kmem_cache_size greater than or equal to it must be a
919 * multiple of 64.
920 */
921static const int kmem_alloc_sizes[] = {
922	1 * 8,
923	2 * 8,
924	3 * 8,
925	4 * 8,		5 * 8,		6 * 8,		7 * 8,
926	4 * 16,		5 * 16,		6 * 16,		7 * 16,
927	4 * 32,		5 * 32,		6 * 32,		7 * 32,
928	4 * 64,		5 * 64,		6 * 64,		7 * 64,
929	4 * 128,	5 * 128,	6 * 128,	7 * 128,
930	P2ALIGN(8192 / 7, 64),
931	P2ALIGN(8192 / 6, 64),
932	P2ALIGN(8192 / 5, 64),
933	P2ALIGN(8192 / 4, 64),
934	P2ALIGN(8192 / 3, 64),
935	P2ALIGN(8192 / 2, 64),
936	P2ALIGN(8192 / 1, 64),
937	4096 * 3,
938	8192 * 2,
939	8192 * 3,
940	8192 * 4,
941};
942
943#define	KMEM_MAXBUF	32768
944
945static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
946
947static kmem_magtype_t kmem_magtype[] = {
948	{ 1,	8,	3200,	65536	},
949	{ 3,	16,	256,	32768	},
950	{ 7,	32,	64,	16384	},
951	{ 15,	64,	0,	8192	},
952	{ 31,	64,	0,	4096	},
953	{ 47,	64,	0,	2048	},
954	{ 63,	64,	0,	1024	},
955	{ 95,	64,	0,	512	},
956	{ 143,	64,	0,	0	},
957};
958
959static uint32_t kmem_reaping;
960static uint32_t kmem_reaping_idspace;
961
962/*
963 * kmem tunables
964 */
965clock_t kmem_reap_interval;	/* cache reaping rate [15 * HZ ticks] */
966int kmem_depot_contention = 3;	/* max failed tryenters per real interval */
967pgcnt_t kmem_reapahead = 0;	/* start reaping N pages before pageout */
968int kmem_panic = 1;		/* whether to panic on error */
969int kmem_logging = 1;		/* kmem_log_enter() override */
970uint32_t kmem_mtbf = 0;		/* mean time between failures [default: off] */
971size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
972size_t kmem_content_log_size;	/* content log size [2% of memory] */
973size_t kmem_failure_log_size;	/* failure log [4 pages per CPU] */
974size_t kmem_slab_log_size;	/* slab create log [4 pages per CPU] */
975size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
976size_t kmem_lite_minsize = 0;	/* minimum buffer size for KMF_LITE */
977size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
978int kmem_lite_pcs = 4;		/* number of PCs to store in KMF_LITE mode */
979size_t kmem_maxverify;		/* maximum bytes to inspect in debug routines */
980size_t kmem_minfirewall;	/* hardware-enforced redzone threshold */
981
982#ifdef DEBUG
983int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
984#else
985int kmem_flags = 0;
986#endif
987int kmem_ready;
988
989static kmem_cache_t	*kmem_slab_cache;
990static kmem_cache_t	*kmem_bufctl_cache;
991static kmem_cache_t	*kmem_bufctl_audit_cache;
992
993static kmutex_t		kmem_cache_lock;	/* inter-cache linkage only */
994static list_t		kmem_caches;
995
996static taskq_t		*kmem_taskq;
997static kmutex_t		kmem_flags_lock;
998static vmem_t		*kmem_metadata_arena;
999static vmem_t		*kmem_msb_arena;	/* arena for metadata caches */
1000static vmem_t		*kmem_cache_arena;
1001static vmem_t		*kmem_hash_arena;
1002static vmem_t		*kmem_log_arena;
1003static vmem_t		*kmem_oversize_arena;
1004static vmem_t		*kmem_va_arena;
1005static vmem_t		*kmem_default_arena;
1006static vmem_t		*kmem_firewall_va_arena;
1007static vmem_t		*kmem_firewall_arena;
1008
1009/*
1010 * Define KMEM_STATS to turn on statistic gathering. By default, it is only
1011 * turned on when DEBUG is also defined.
1012 */
1013#ifdef	DEBUG
1014#define	KMEM_STATS
1015#endif	/* DEBUG */
1016
1017#ifdef	KMEM_STATS
1018#define	KMEM_STAT_ADD(stat)			((stat)++)
1019#define	KMEM_STAT_COND_ADD(cond, stat)		((void) (!(cond) || (stat)++))
1020#else
1021#define	KMEM_STAT_ADD(stat)			/* nothing */
1022#define	KMEM_STAT_COND_ADD(cond, stat)		/* nothing */
1023#endif	/* KMEM_STATS */
1024
1025/*
1026 * kmem slab consolidator thresholds (tunables)
1027 */
1028static size_t kmem_frag_minslabs = 101;	/* minimum total slabs */
1029static size_t kmem_frag_numer = 1;	/* free buffers (numerator) */
1030static size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1031/*
1032 * Maximum number of slabs from which to move buffers during a single
1033 * maintenance interval while the system is not low on memory.
1034 */
1035static size_t kmem_reclaim_max_slabs = 1;
1036/*
1037 * Number of slabs to scan backwards from the end of the partial slab list
1038 * when searching for buffers to relocate.
1039 */
1040static size_t kmem_reclaim_scan_range = 12;
1041
1042#ifdef	KMEM_STATS
1043static struct {
1044	uint64_t kms_callbacks;
1045	uint64_t kms_yes;
1046	uint64_t kms_no;
1047	uint64_t kms_later;
1048	uint64_t kms_dont_need;
1049	uint64_t kms_dont_know;
1050	uint64_t kms_hunt_found_slab;
1051	uint64_t kms_hunt_found_mag;
1052	uint64_t kms_hunt_notfound;
1053	uint64_t kms_hunt_alloc_fail;
1054	uint64_t kms_hunt_lucky;
1055	uint64_t kms_notify;
1056	uint64_t kms_notify_callbacks;
1057	uint64_t kms_disbelief;
1058	uint64_t kms_already_pending;
1059	uint64_t kms_callback_alloc_fail;
1060	uint64_t kms_endscan_slab_destroyed;
1061	uint64_t kms_endscan_nomem;
1062	uint64_t kms_endscan_slab_all_used;
1063	uint64_t kms_endscan_refcnt_changed;
1064	uint64_t kms_endscan_nomove_changed;
1065	uint64_t kms_endscan_freelist;
1066	uint64_t kms_avl_update;
1067	uint64_t kms_avl_noupdate;
1068	uint64_t kms_no_longer_reclaimable;
1069	uint64_t kms_notify_no_longer_reclaimable;
1070	uint64_t kms_alloc_fail;
1071	uint64_t kms_constructor_fail;
1072	uint64_t kms_dead_slabs_freed;
1073	uint64_t kms_defrags;
1074	uint64_t kms_scan_depot_ws_reaps;
1075	uint64_t kms_debug_reaps;
1076	uint64_t kms_debug_move_scans;
1077} kmem_move_stats;
1078#endif	/* KMEM_STATS */
1079
1080/* consolidator knobs */
1081static boolean_t kmem_move_noreap;
1082static boolean_t kmem_move_blocked;
1083static boolean_t kmem_move_fulltilt;
1084static boolean_t kmem_move_any_partial;
1085
1086#ifdef	DEBUG
1087/*
1088 * Ensure code coverage by occasionally running the consolidator even when the
1089 * caches are not fragmented (they may never be). These intervals are mean time
1090 * in cache maintenance intervals (kmem_cache_update).
1091 */
1092static int kmem_mtb_move = 60;		/* defrag 1 slab (~15min) */
1093static int kmem_mtb_reap = 1800;	/* defrag all slabs (~7.5hrs) */
1094#endif	/* DEBUG */
1095
1096static kmem_cache_t	*kmem_defrag_cache;
1097static kmem_cache_t	*kmem_move_cache;
1098static taskq_t		*kmem_move_taskq;
1099
1100static void kmem_cache_scan(kmem_cache_t *);
1101static void kmem_cache_defrag(kmem_cache_t *);
1102
1103
1104kmem_log_header_t	*kmem_transaction_log;
1105kmem_log_header_t	*kmem_content_log;
1106kmem_log_header_t	*kmem_failure_log;
1107kmem_log_header_t	*kmem_slab_log;
1108
1109static int		kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1110
1111#define	KMEM_BUFTAG_LITE_ENTER(bt, count, caller)			\
1112	if ((count) > 0) {						\
1113		pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history;	\
1114		pc_t *_e;						\
1115		/* memmove() the old entries down one notch */		\
1116		for (_e = &_s[(count) - 1]; _e > _s; _e--)		\
1117			*_e = *(_e - 1);				\
1118		*_s = (uintptr_t)(caller);				\
1119	}
1120
1121#define	KMERR_MODIFIED	0	/* buffer modified while on freelist */
1122#define	KMERR_REDZONE	1	/* redzone violation (write past end of buf) */
1123#define	KMERR_DUPFREE	2	/* freed a buffer twice */
1124#define	KMERR_BADADDR	3	/* freed a bad (unallocated) address */
1125#define	KMERR_BADBUFTAG	4	/* buftag corrupted */
1126#define	KMERR_BADBUFCTL	5	/* bufctl corrupted */
1127#define	KMERR_BADCACHE	6	/* freed a buffer to the wrong cache */
1128#define	KMERR_BADSIZE	7	/* alloc size != free size */
1129#define	KMERR_BADBASE	8	/* buffer base address wrong */
1130
1131struct {
1132	hrtime_t	kmp_timestamp;	/* timestamp of panic */
1133	int		kmp_error;	/* type of kmem error */
1134	void		*kmp_buffer;	/* buffer that induced panic */
1135	void		*kmp_realbuf;	/* real start address for buffer */
1136	kmem_cache_t	*kmp_cache;	/* buffer's cache according to client */
1137	kmem_cache_t	*kmp_realcache;	/* actual cache containing buffer */
1138	kmem_slab_t	*kmp_slab;	/* slab accoring to kmem_findslab() */
1139	kmem_bufctl_t	*kmp_bufctl;	/* bufctl */
1140} kmem_panic_info;
1141
1142
1143static void
1144copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1145{
1146	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1147	uint64_t *buf = buf_arg;
1148
1149	while (buf < bufend)
1150		*buf++ = pattern;
1151}
1152
1153static void *
1154verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1155{
1156	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1157	uint64_t *buf;
1158
1159	for (buf = buf_arg; buf < bufend; buf++)
1160		if (*buf != pattern)
1161			return (buf);
1162	return (NULL);
1163}
1164
1165static void *
1166verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1167{
1168	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1169	uint64_t *buf;
1170
1171	for (buf = buf_arg; buf < bufend; buf++) {
1172		if (*buf != old) {
1173			copy_pattern(old, buf_arg,
1174			    (char *)buf - (char *)buf_arg);
1175			return (buf);
1176		}
1177		*buf = new;
1178	}
1179
1180	return (NULL);
1181}
1182
1183static void
1184kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1185{
1186	kmem_cache_t *cp;
1187
1188	mutex_enter(&kmem_cache_lock);
1189	for (cp = list_head(&kmem_caches); cp != NULL;
1190	    cp = list_next(&kmem_caches, cp))
1191		if (tq != NULL)
1192			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1193			    tqflag);
1194		else
1195			func(cp);
1196	mutex_exit(&kmem_cache_lock);
1197}
1198
1199static void
1200kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1201{
1202	kmem_cache_t *cp;
1203
1204	mutex_enter(&kmem_cache_lock);
1205	for (cp = list_head(&kmem_caches); cp != NULL;
1206	    cp = list_next(&kmem_caches, cp)) {
1207		if (!(cp->cache_cflags & KMC_IDENTIFIER))
1208			continue;
1209		if (tq != NULL)
1210			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1211			    tqflag);
1212		else
1213			func(cp);
1214	}
1215	mutex_exit(&kmem_cache_lock);
1216}
1217
1218/*
1219 * Debugging support.  Given a buffer address, find its slab.
1220 */
1221static kmem_slab_t *
1222kmem_findslab(kmem_cache_t *cp, void *buf)
1223{
1224	kmem_slab_t *sp;
1225
1226	mutex_enter(&cp->cache_lock);
1227	for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1228	    sp = list_next(&cp->cache_complete_slabs, sp)) {
1229		if (KMEM_SLAB_MEMBER(sp, buf)) {
1230			mutex_exit(&cp->cache_lock);
1231			return (sp);
1232		}
1233	}
1234	for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1235	    sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1236		if (KMEM_SLAB_MEMBER(sp, buf)) {
1237			mutex_exit(&cp->cache_lock);
1238			return (sp);
1239		}
1240	}
1241	mutex_exit(&cp->cache_lock);
1242
1243	return (NULL);
1244}
1245
1246static void
1247kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1248{
1249	kmem_buftag_t *btp = NULL;
1250	kmem_bufctl_t *bcp = NULL;
1251	kmem_cache_t *cp = cparg;
1252	kmem_slab_t *sp;
1253	uint64_t *off;
1254	void *buf = bufarg;
1255
1256	kmem_logging = 0;	/* stop logging when a bad thing happens */
1257
1258	kmem_panic_info.kmp_timestamp = gethrtime();
1259
1260	sp = kmem_findslab(cp, buf);
1261	if (sp == NULL) {
1262		for (cp = list_tail(&kmem_caches); cp != NULL;
1263		    cp = list_prev(&kmem_caches, cp)) {
1264			if ((sp = kmem_findslab(cp, buf)) != NULL)
1265				break;
1266		}
1267	}
1268
1269	if (sp == NULL) {
1270		cp = NULL;
1271		error = KMERR_BADADDR;
1272	} else {
1273		if (cp != cparg)
1274			error = KMERR_BADCACHE;
1275		else
1276			buf = (char *)bufarg - ((uintptr_t)bufarg -
1277			    (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1278		if (buf != bufarg)
1279			error = KMERR_BADBASE;
1280		if (cp->cache_flags & KMF_BUFTAG)
1281			btp = KMEM_BUFTAG(cp, buf);
1282		if (cp->cache_flags & KMF_HASH) {
1283			mutex_enter(&cp->cache_lock);
1284			for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1285				if (bcp->bc_addr == buf)
1286					break;
1287			mutex_exit(&cp->cache_lock);
1288			if (bcp == NULL && btp != NULL)
1289				bcp = btp->bt_bufctl;
1290			if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1291			    NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1292			    bcp->bc_addr != buf) {
1293				error = KMERR_BADBUFCTL;
1294				bcp = NULL;
1295			}
1296		}
1297	}
1298
1299	kmem_panic_info.kmp_error = error;
1300	kmem_panic_info.kmp_buffer = bufarg;
1301	kmem_panic_info.kmp_realbuf = buf;
1302	kmem_panic_info.kmp_cache = cparg;
1303	kmem_panic_info.kmp_realcache = cp;
1304	kmem_panic_info.kmp_slab = sp;
1305	kmem_panic_info.kmp_bufctl = bcp;
1306
1307	printf("kernel memory allocator: ");
1308
1309	switch (error) {
1310
1311	case KMERR_MODIFIED:
1312		printf("buffer modified after being freed\n");
1313		off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1314		if (off == NULL)	/* shouldn't happen */
1315			off = buf;
1316		printf("modification occurred at offset 0x%lx "
1317		    "(0x%llx replaced by 0x%llx)\n",
1318		    (uintptr_t)off - (uintptr_t)buf,
1319		    (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1320		break;
1321
1322	case KMERR_REDZONE:
1323		printf("redzone violation: write past end of buffer\n");
1324		break;
1325
1326	case KMERR_BADADDR:
1327		printf("invalid free: buffer not in cache\n");
1328		break;
1329
1330	case KMERR_DUPFREE:
1331		printf("duplicate free: buffer freed twice\n");
1332		break;
1333
1334	case KMERR_BADBUFTAG:
1335		printf("boundary tag corrupted\n");
1336		printf("bcp ^ bxstat = %lx, should be %lx\n",
1337		    (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1338		    KMEM_BUFTAG_FREE);
1339		break;
1340
1341	case KMERR_BADBUFCTL:
1342		printf("bufctl corrupted\n");
1343		break;
1344
1345	case KMERR_BADCACHE:
1346		printf("buffer freed to wrong cache\n");
1347		printf("buffer was allocated from %s,\n", cp->cache_name);
1348		printf("caller attempting free to %s.\n", cparg->cache_name);
1349		break;
1350
1351	case KMERR_BADSIZE:
1352		printf("bad free: free size (%u) != alloc size (%u)\n",
1353		    KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1354		    KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1355		break;
1356
1357	case KMERR_BADBASE:
1358		printf("bad free: free address (%p) != alloc address (%p)\n",
1359		    bufarg, buf);
1360		break;
1361	}
1362
1363	printf("buffer=%p  bufctl=%p  cache: %s\n",
1364	    bufarg, (void *)bcp, cparg->cache_name);
1365
1366	if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1367	    error != KMERR_BADBUFCTL) {
1368		int d;
1369		timestruc_t ts;
1370		kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1371
1372		hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1373		printf("previous transaction on buffer %p:\n", buf);
1374		printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1375		    (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1376		    (void *)sp, cp->cache_name);
1377		for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1378			ulong_t off;
1379			char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1380			printf("%s+%lx\n", sym ? sym : "?", off);
1381		}
1382	}
1383	if (kmem_panic > 0)
1384		panic("kernel heap corruption detected");
1385	if (kmem_panic == 0)
1386		debug_enter(NULL);
1387	kmem_logging = 1;	/* resume logging */
1388}
1389
1390static kmem_log_header_t *
1391kmem_log_init(size_t logsize)
1392{
1393	kmem_log_header_t *lhp;
1394	int nchunks = 4 * max_ncpus;
1395	size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1396	int i;
1397
1398	/*
1399	 * Make sure that lhp->lh_cpu[] is nicely aligned
1400	 * to prevent false sharing of cache lines.
1401	 */
1402	lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1403	lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1404	    NULL, NULL, VM_SLEEP);
1405	bzero(lhp, lhsize);
1406
1407	mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1408	lhp->lh_nchunks = nchunks;
1409	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1410	lhp->lh_base = vmem_alloc(kmem_log_arena,
1411	    lhp->lh_chunksize * nchunks, VM_SLEEP);
1412	lhp->lh_free = vmem_alloc(kmem_log_arena,
1413	    nchunks * sizeof (int), VM_SLEEP);
1414	bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1415
1416	for (i = 0; i < max_ncpus; i++) {
1417		kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1418		mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1419		clhp->clh_chunk = i;
1420	}
1421
1422	for (i = max_ncpus; i < nchunks; i++)
1423		lhp->lh_free[i] = i;
1424
1425	lhp->lh_head = max_ncpus;
1426	lhp->lh_tail = 0;
1427
1428	return (lhp);
1429}
1430
1431static void *
1432kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1433{
1434	void *logspace;
1435	kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1436
1437	if (lhp == NULL || kmem_logging == 0 || panicstr)
1438		return (NULL);
1439
1440	mutex_enter(&clhp->clh_lock);
1441	clhp->clh_hits++;
1442	if (size > clhp->clh_avail) {
1443		mutex_enter(&lhp->lh_lock);
1444		lhp->lh_hits++;
1445		lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1446		lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1447		clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1448		lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1449		clhp->clh_current = lhp->lh_base +
1450		    clhp->clh_chunk * lhp->lh_chunksize;
1451		clhp->clh_avail = lhp->lh_chunksize;
1452		if (size > lhp->lh_chunksize)
1453			size = lhp->lh_chunksize;
1454		mutex_exit(&lhp->lh_lock);
1455	}
1456	logspace = clhp->clh_current;
1457	clhp->clh_current += size;
1458	clhp->clh_avail -= size;
1459	bcopy(data, logspace, size);
1460	mutex_exit(&clhp->clh_lock);
1461	return (logspace);
1462}
1463
1464#define	KMEM_AUDIT(lp, cp, bcp)						\
1465{									\
1466	kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);	\
1467	_bcp->bc_timestamp = gethrtime();				\
1468	_bcp->bc_thread = curthread;					\
1469	_bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);	\
1470	_bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));	\
1471}
1472
1473static void
1474kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1475	kmem_slab_t *sp, void *addr)
1476{
1477	kmem_bufctl_audit_t bca;
1478
1479	bzero(&bca, sizeof (kmem_bufctl_audit_t));
1480	bca.bc_addr = addr;
1481	bca.bc_slab = sp;
1482	bca.bc_cache = cp;
1483	KMEM_AUDIT(lp, cp, &bca);
1484}
1485
1486/*
1487 * Create a new slab for cache cp.
1488 */
1489static kmem_slab_t *
1490kmem_slab_create(kmem_cache_t *cp, int kmflag)
1491{
1492	size_t slabsize = cp->cache_slabsize;
1493	size_t chunksize = cp->cache_chunksize;
1494	int cache_flags = cp->cache_flags;
1495	size_t color, chunks;
1496	char *buf, *slab;
1497	kmem_slab_t *sp;
1498	kmem_bufctl_t *bcp;
1499	vmem_t *vmp = cp->cache_arena;
1500
1501	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1502
1503	color = cp->cache_color + cp->cache_align;
1504	if (color > cp->cache_maxcolor)
1505		color = cp->cache_mincolor;
1506	cp->cache_color = color;
1507
1508	slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1509
1510	if (slab == NULL)
1511		goto vmem_alloc_failure;
1512
1513	ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1514
1515	/*
1516	 * Reverify what was already checked in kmem_cache_set_move(), since the
1517	 * consolidator depends (for correctness) on slabs being initialized
1518	 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1519	 * clients to distinguish uninitialized memory from known objects).
1520	 */
1521	ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1522	if (!(cp->cache_cflags & KMC_NOTOUCH))
1523		copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1524
1525	if (cache_flags & KMF_HASH) {
1526		if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1527			goto slab_alloc_failure;
1528		chunks = (slabsize - color) / chunksize;
1529	} else {
1530		sp = KMEM_SLAB(cp, slab);
1531		chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1532	}
1533
1534	sp->slab_cache	= cp;
1535	sp->slab_head	= NULL;
1536	sp->slab_refcnt	= 0;
1537	sp->slab_base	= buf = slab + color;
1538	sp->slab_chunks	= chunks;
1539	sp->slab_stuck_offset = (uint32_t)-1;
1540	sp->slab_later_count = 0;
1541	sp->slab_flags = 0;
1542
1543	ASSERT(chunks > 0);
1544	while (chunks-- != 0) {
1545		if (cache_flags & KMF_HASH) {
1546			bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1547			if (bcp == NULL)
1548				goto bufctl_alloc_failure;
1549			if (cache_flags & KMF_AUDIT) {
1550				kmem_bufctl_audit_t *bcap =
1551				    (kmem_bufctl_audit_t *)bcp;
1552				bzero(bcap, sizeof (kmem_bufctl_audit_t));
1553				bcap->bc_cache = cp;
1554			}
1555			bcp->bc_addr = buf;
1556			bcp->bc_slab = sp;
1557		} else {
1558			bcp = KMEM_BUFCTL(cp, buf);
1559		}
1560		if (cache_flags & KMF_BUFTAG) {
1561			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1562			btp->bt_redzone = KMEM_REDZONE_PATTERN;
1563			btp->bt_bufctl = bcp;
1564			btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1565			if (cache_flags & KMF_DEADBEEF) {
1566				copy_pattern(KMEM_FREE_PATTERN, buf,
1567				    cp->cache_verify);
1568			}
1569		}
1570		bcp->bc_next = sp->slab_head;
1571		sp->slab_head = bcp;
1572		buf += chunksize;
1573	}
1574
1575	kmem_log_event(kmem_slab_log, cp, sp, slab);
1576
1577	return (sp);
1578
1579bufctl_alloc_failure:
1580
1581	while ((bcp = sp->slab_head) != NULL) {
1582		sp->slab_head = bcp->bc_next;
1583		kmem_cache_free(cp->cache_bufctl_cache, bcp);
1584	}
1585	kmem_cache_free(kmem_slab_cache, sp);
1586
1587slab_alloc_failure:
1588
1589	vmem_free(vmp, slab, slabsize);
1590
1591vmem_alloc_failure:
1592
1593	kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1594	atomic_add_64(&cp->cache_alloc_fail, 1);
1595
1596	return (NULL);
1597}
1598
1599/*
1600 * Destroy a slab.
1601 */
1602static void
1603kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1604{
1605	vmem_t *vmp = cp->cache_arena;
1606	void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1607
1608	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1609	ASSERT(sp->slab_refcnt == 0);
1610
1611	if (cp->cache_flags & KMF_HASH) {
1612		kmem_bufctl_t *bcp;
1613		while ((bcp = sp->slab_head) != NULL) {
1614			sp->slab_head = bcp->bc_next;
1615			kmem_cache_free(cp->cache_bufctl_cache, bcp);
1616		}
1617		kmem_cache_free(kmem_slab_cache, sp);
1618	}
1619	vmem_free(vmp, slab, cp->cache_slabsize);
1620}
1621
1622static void *
1623kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp)
1624{
1625	kmem_bufctl_t *bcp, **hash_bucket;
1626	void *buf;
1627
1628	ASSERT(MUTEX_HELD(&cp->cache_lock));
1629	/*
1630	 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1631	 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1632	 * slab is newly created (sp->slab_refcnt == 0).
1633	 */
1634	ASSERT((sp->slab_refcnt == 0) || (KMEM_SLAB_IS_PARTIAL(sp) &&
1635	    (sp == avl_first(&cp->cache_partial_slabs))));
1636	ASSERT(sp->slab_cache == cp);
1637
1638	cp->cache_slab_alloc++;
1639	cp->cache_bufslab--;
1640	sp->slab_refcnt++;
1641
1642	bcp = sp->slab_head;
1643	if ((sp->slab_head = bcp->bc_next) == NULL) {
1644		ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1645		if (sp->slab_refcnt == 1) {
1646			ASSERT(sp->slab_chunks == 1);
1647		} else {
1648			ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1649			avl_remove(&cp->cache_partial_slabs, sp);
1650			sp->slab_later_count = 0; /* clear history */
1651			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1652			sp->slab_stuck_offset = (uint32_t)-1;
1653		}
1654		list_insert_head(&cp->cache_complete_slabs, sp);
1655		cp->cache_complete_slab_count++;
1656	} else {
1657		ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1658		if (sp->slab_refcnt == 1) {
1659			avl_add(&cp->cache_partial_slabs, sp);
1660		} else {
1661			/*
1662			 * The slab is now more allocated than it was, so the
1663			 * order remains unchanged.
1664			 */
1665			ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1666		}
1667	}
1668
1669	if (cp->cache_flags & KMF_HASH) {
1670		/*
1671		 * Add buffer to allocated-address hash table.
1672		 */
1673		buf = bcp->bc_addr;
1674		hash_bucket = KMEM_HASH(cp, buf);
1675		bcp->bc_next = *hash_bucket;
1676		*hash_bucket = bcp;
1677		if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1678			KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1679		}
1680	} else {
1681		buf = KMEM_BUF(cp, bcp);
1682	}
1683
1684	ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1685	return (buf);
1686}
1687
1688/*
1689 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1690 */
1691static void *
1692kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1693{
1694	kmem_slab_t *sp;
1695	void *buf;
1696
1697	mutex_enter(&cp->cache_lock);
1698	sp = avl_first(&cp->cache_partial_slabs);
1699	if (sp == NULL) {
1700		ASSERT(cp->cache_bufslab == 0);
1701
1702		/*
1703		 * The freelist is empty.  Create a new slab.
1704		 */
1705		mutex_exit(&cp->cache_lock);
1706		if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1707			return (NULL);
1708		}
1709		mutex_enter(&cp->cache_lock);
1710		cp->cache_slab_create++;
1711		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1712			cp->cache_bufmax = cp->cache_buftotal;
1713		cp->cache_bufslab += sp->slab_chunks;
1714	}
1715
1716	buf = kmem_slab_alloc_impl(cp, sp);
1717	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1718	    (cp->cache_complete_slab_count +
1719	    avl_numnodes(&cp->cache_partial_slabs) +
1720	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1721	mutex_exit(&cp->cache_lock);
1722
1723	return (buf);
1724}
1725
1726static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1727
1728/*
1729 * Free a raw (unconstructed) buffer to cp's slab layer.
1730 */
1731static void
1732kmem_slab_free(kmem_cache_t *cp, void *buf)
1733{
1734	kmem_slab_t *sp;
1735	kmem_bufctl_t *bcp, **prev_bcpp;
1736
1737	ASSERT(buf != NULL);
1738
1739	mutex_enter(&cp->cache_lock);
1740	cp->cache_slab_free++;
1741
1742	if (cp->cache_flags & KMF_HASH) {
1743		/*
1744		 * Look up buffer in allocated-address hash table.
1745		 */
1746		prev_bcpp = KMEM_HASH(cp, buf);
1747		while ((bcp = *prev_bcpp) != NULL) {
1748			if (bcp->bc_addr == buf) {
1749				*prev_bcpp = bcp->bc_next;
1750				sp = bcp->bc_slab;
1751				break;
1752			}
1753			cp->cache_lookup_depth++;
1754			prev_bcpp = &bcp->bc_next;
1755		}
1756	} else {
1757		bcp = KMEM_BUFCTL(cp, buf);
1758		sp = KMEM_SLAB(cp, buf);
1759	}
1760
1761	if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1762		mutex_exit(&cp->cache_lock);
1763		kmem_error(KMERR_BADADDR, cp, buf);
1764		return;
1765	}
1766
1767	if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1768		/*
1769		 * If this is the buffer that prevented the consolidator from
1770		 * clearing the slab, we can reset the slab flags now that the
1771		 * buffer is freed. (It makes sense to do this in
1772		 * kmem_cache_free(), where the client gives up ownership of the
1773		 * buffer, but on the hot path the test is too expensive.)
1774		 */
1775		kmem_slab_move_yes(cp, sp, buf);
1776	}
1777
1778	if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1779		if (cp->cache_flags & KMF_CONTENTS)
1780			((kmem_bufctl_audit_t *)bcp)->bc_contents =
1781			    kmem_log_enter(kmem_content_log, buf,
1782			    cp->cache_contents);
1783		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1784	}
1785
1786	bcp->bc_next = sp->slab_head;
1787	sp->slab_head = bcp;
1788
1789	cp->cache_bufslab++;
1790	ASSERT(sp->slab_refcnt >= 1);
1791
1792	if (--sp->slab_refcnt == 0) {
1793		/*
1794		 * There are no outstanding allocations from this slab,
1795		 * so we can reclaim the memory.
1796		 */
1797		if (sp->slab_chunks == 1) {
1798			list_remove(&cp->cache_complete_slabs, sp);
1799			cp->cache_complete_slab_count--;
1800		} else {
1801			avl_remove(&cp->cache_partial_slabs, sp);
1802		}
1803
1804		cp->cache_buftotal -= sp->slab_chunks;
1805		cp->cache_bufslab -= sp->slab_chunks;
1806		/*
1807		 * Defer releasing the slab to the virtual memory subsystem
1808		 * while there is a pending move callback, since we guarantee
1809		 * that buffers passed to the move callback have only been
1810		 * touched by kmem or by the client itself. Since the memory
1811		 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1812		 * set at least one of the two lowest order bits, the client can
1813		 * test those bits in the move callback to determine whether or
1814		 * not it knows about the buffer (assuming that the client also
1815		 * sets one of those low order bits whenever it frees a buffer).
1816		 */
1817		if (cp->cache_defrag == NULL ||
1818		    (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1819		    !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1820			cp->cache_slab_destroy++;
1821			mutex_exit(&cp->cache_lock);
1822			kmem_slab_destroy(cp, sp);
1823		} else {
1824			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1825			/*
1826			 * Slabs are inserted at both ends of the deadlist to
1827			 * distinguish between slabs freed while move callbacks
1828			 * are pending (list head) and a slab freed while the
1829			 * lock is dropped in kmem_move_buffers() (list tail) so
1830			 * that in both cases slab_destroy() is called from the
1831			 * right context.
1832			 */
1833			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1834				list_insert_tail(deadlist, sp);
1835			} else {
1836				list_insert_head(deadlist, sp);
1837			}
1838			cp->cache_defrag->kmd_deadcount++;
1839			mutex_exit(&cp->cache_lock);
1840		}
1841		return;
1842	}
1843
1844	if (bcp->bc_next == NULL) {
1845		/* Transition the slab from completely allocated to partial. */
1846		ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1847		ASSERT(sp->slab_chunks > 1);
1848		list_remove(&cp->cache_complete_slabs, sp);
1849		cp->cache_complete_slab_count--;
1850		avl_add(&cp->cache_partial_slabs, sp);
1851	} else {
1852#ifdef	DEBUG
1853		if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1854			KMEM_STAT_ADD(kmem_move_stats.kms_avl_update);
1855		} else {
1856			KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate);
1857		}
1858#else
1859		(void) avl_update_gt(&cp->cache_partial_slabs, sp);
1860#endif
1861	}
1862
1863	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1864	    (cp->cache_complete_slab_count +
1865	    avl_numnodes(&cp->cache_partial_slabs) +
1866	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1867	mutex_exit(&cp->cache_lock);
1868}
1869
1870/*
1871 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1872 */
1873static int
1874kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1875    caddr_t caller)
1876{
1877	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1878	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1879	uint32_t mtbf;
1880
1881	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1882		kmem_error(KMERR_BADBUFTAG, cp, buf);
1883		return (-1);
1884	}
1885
1886	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1887
1888	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1889		kmem_error(KMERR_BADBUFCTL, cp, buf);
1890		return (-1);
1891	}
1892
1893	if (cp->cache_flags & KMF_DEADBEEF) {
1894		if (!construct && (cp->cache_flags & KMF_LITE)) {
1895			if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1896				kmem_error(KMERR_MODIFIED, cp, buf);
1897				return (-1);
1898			}
1899			if (cp->cache_constructor != NULL)
1900				*(uint64_t *)buf = btp->bt_redzone;
1901			else
1902				*(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1903		} else {
1904			construct = 1;
1905			if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1906			    KMEM_UNINITIALIZED_PATTERN, buf,
1907			    cp->cache_verify)) {
1908				kmem_error(KMERR_MODIFIED, cp, buf);
1909				return (-1);
1910			}
1911		}
1912	}
1913	btp->bt_redzone = KMEM_REDZONE_PATTERN;
1914
1915	if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1916	    gethrtime() % mtbf == 0 &&
1917	    (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1918		kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1919		if (!construct && cp->cache_destructor != NULL)
1920			cp->cache_destructor(buf, cp->cache_private);
1921	} else {
1922		mtbf = 0;
1923	}
1924
1925	if (mtbf || (construct && cp->cache_constructor != NULL &&
1926	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1927		atomic_add_64(&cp->cache_alloc_fail, 1);
1928		btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1929		if (cp->cache_flags & KMF_DEADBEEF)
1930			copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1931		kmem_slab_free(cp, buf);
1932		return (1);
1933	}
1934
1935	if (cp->cache_flags & KMF_AUDIT) {
1936		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1937	}
1938
1939	if ((cp->cache_flags & KMF_LITE) &&
1940	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1941		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1942	}
1943
1944	return (0);
1945}
1946
1947static int
1948kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1949{
1950	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1951	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1952	kmem_slab_t *sp;
1953
1954	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1955		if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1956			kmem_error(KMERR_DUPFREE, cp, buf);
1957			return (-1);
1958		}
1959		sp = kmem_findslab(cp, buf);
1960		if (sp == NULL || sp->slab_cache != cp)
1961			kmem_error(KMERR_BADADDR, cp, buf);
1962		else
1963			kmem_error(KMERR_REDZONE, cp, buf);
1964		return (-1);
1965	}
1966
1967	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1968
1969	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1970		kmem_error(KMERR_BADBUFCTL, cp, buf);
1971		return (-1);
1972	}
1973
1974	if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
1975		kmem_error(KMERR_REDZONE, cp, buf);
1976		return (-1);
1977	}
1978
1979	if (cp->cache_flags & KMF_AUDIT) {
1980		if (cp->cache_flags & KMF_CONTENTS)
1981			bcp->bc_contents = kmem_log_enter(kmem_content_log,
1982			    buf, cp->cache_contents);
1983		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1984	}
1985
1986	if ((cp->cache_flags & KMF_LITE) &&
1987	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1988		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1989	}
1990
1991	if (cp->cache_flags & KMF_DEADBEEF) {
1992		if (cp->cache_flags & KMF_LITE)
1993			btp->bt_redzone = *(uint64_t *)buf;
1994		else if (cp->cache_destructor != NULL)
1995			cp->cache_destructor(buf, cp->cache_private);
1996
1997		copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1998	}
1999
2000	return (0);
2001}
2002
2003/*
2004 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2005 */
2006static void
2007kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2008{
2009	int round;
2010
2011	ASSERT(!list_link_active(&cp->cache_link) ||
2012	    taskq_member(kmem_taskq, curthread));
2013
2014	for (round = 0; round < nrounds; round++) {
2015		void *buf = mp->mag_round[round];
2016
2017		if (cp->cache_flags & KMF_DEADBEEF) {
2018			if (verify_pattern(KMEM_FREE_PATTERN, buf,
2019			    cp->cache_verify) != NULL) {
2020				kmem_error(KMERR_MODIFIED, cp, buf);
2021				continue;
2022			}
2023			if ((cp->cache_flags & KMF_LITE) &&
2024			    cp->cache_destructor != NULL) {
2025				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2026				*(uint64_t *)buf = btp->bt_redzone;
2027				cp->cache_destructor(buf, cp->cache_private);
2028				*(uint64_t *)buf = KMEM_FREE_PATTERN;
2029			}
2030		} else if (cp->cache_destructor != NULL) {
2031			cp->cache_destructor(buf, cp->cache_private);
2032		}
2033
2034		kmem_slab_free(cp, buf);
2035	}
2036	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2037	kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2038}
2039
2040/*
2041 * Allocate a magazine from the depot.
2042 */
2043static kmem_magazine_t *
2044kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2045{
2046	kmem_magazine_t *mp;
2047
2048	/*
2049	 * If we can't get the depot lock without contention,
2050	 * update our contention count.  We use the depot
2051	 * contention rate to determine whether we need to
2052	 * increase the magazine size for better scalability.
2053	 */
2054	if (!mutex_tryenter(&cp->cache_depot_lock)) {
2055		mutex_enter(&cp->cache_depot_lock);
2056		cp->cache_depot_contention++;
2057	}
2058
2059	if ((mp = mlp->ml_list) != NULL) {
2060		ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2061		mlp->ml_list = mp->mag_next;
2062		if (--mlp->ml_total < mlp->ml_min)
2063			mlp->ml_min = mlp->ml_total;
2064		mlp->ml_alloc++;
2065	}
2066
2067	mutex_exit(&cp->cache_depot_lock);
2068
2069	return (mp);
2070}
2071
2072/*
2073 * Free a magazine to the depot.
2074 */
2075static void
2076kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2077{
2078	mutex_enter(&cp->cache_depot_lock);
2079	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2080	mp->mag_next = mlp->ml_list;
2081	mlp->ml_list = mp;
2082	mlp->ml_total++;
2083	mutex_exit(&cp->cache_depot_lock);
2084}
2085
2086/*
2087 * Update the working set statistics for cp's depot.
2088 */
2089static void
2090kmem_depot_ws_update(kmem_cache_t *cp)
2091{
2092	mutex_enter(&cp->cache_depot_lock);
2093	cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2094	cp->cache_full.ml_min = cp->cache_full.ml_total;
2095	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2096	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2097	mutex_exit(&cp->cache_depot_lock);
2098}
2099
2100/*
2101 * Reap all magazines that have fallen out of the depot's working set.
2102 */
2103static void
2104kmem_depot_ws_reap(kmem_cache_t *cp)
2105{
2106	long reap;
2107	kmem_magazine_t *mp;
2108
2109	ASSERT(!list_link_active(&cp->cache_link) ||
2110	    taskq_member(kmem_taskq, curthread));
2111
2112	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2113	while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL)
2114		kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2115
2116	reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2117	while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL)
2118		kmem_magazine_destroy(cp, mp, 0);
2119}
2120
2121static void
2122kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2123{
2124	ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2125	    (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2126	ASSERT(ccp->cc_magsize > 0);
2127
2128	ccp->cc_ploaded = ccp->cc_loaded;
2129	ccp->cc_prounds = ccp->cc_rounds;
2130	ccp->cc_loaded = mp;
2131	ccp->cc_rounds = rounds;
2132}
2133
2134/*
2135 * Allocate a constructed object from cache cp.
2136 */
2137void *
2138kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2139{
2140	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2141	kmem_magazine_t *fmp;
2142	void *buf;
2143
2144	mutex_enter(&ccp->cc_lock);
2145	for (;;) {
2146		/*
2147		 * If there's an object available in the current CPU's
2148		 * loaded magazine, just take it and return.
2149		 */
2150		if (ccp->cc_rounds > 0) {
2151			buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2152			ccp->cc_alloc++;
2153			mutex_exit(&ccp->cc_lock);
2154			if ((ccp->cc_flags & KMF_BUFTAG) &&
2155			    kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2156			    caller()) != 0) {
2157				if (kmflag & KM_NOSLEEP)
2158					return (NULL);
2159				mutex_enter(&ccp->cc_lock);
2160				continue;
2161			}
2162			return (buf);
2163		}
2164
2165		/*
2166		 * The loaded magazine is empty.  If the previously loaded
2167		 * magazine was full, exchange them and try again.
2168		 */
2169		if (ccp->cc_prounds > 0) {
2170			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2171			continue;
2172		}
2173
2174		/*
2175		 * If the magazine layer is disabled, break out now.
2176		 */
2177		if (ccp->cc_magsize == 0)
2178			break;
2179
2180		/*
2181		 * Try to get a full magazine from the depot.
2182		 */
2183		fmp = kmem_depot_alloc(cp, &cp->cache_full);
2184		if (fmp != NULL) {
2185			if (ccp->cc_ploaded != NULL)
2186				kmem_depot_free(cp, &cp->cache_empty,
2187				    ccp->cc_ploaded);
2188			kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2189			continue;
2190		}
2191
2192		/*
2193		 * There are no full magazines in the depot,
2194		 * so fall through to the slab layer.
2195		 */
2196		break;
2197	}
2198	mutex_exit(&ccp->cc_lock);
2199
2200	/*
2201	 * We couldn't allocate a constructed object from the magazine layer,
2202	 * so get a raw buffer from the slab layer and apply its constructor.
2203	 */
2204	buf = kmem_slab_alloc(cp, kmflag);
2205
2206	if (buf == NULL)
2207		return (NULL);
2208
2209	if (cp->cache_flags & KMF_BUFTAG) {
2210		/*
2211		 * Make kmem_cache_alloc_debug() apply the constructor for us.
2212		 */
2213		int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2214		if (rc != 0) {
2215			if (kmflag & KM_NOSLEEP)
2216				return (NULL);
2217			/*
2218			 * kmem_cache_alloc_debug() detected corruption
2219			 * but didn't panic (kmem_panic <= 0). We should not be
2220			 * here because the constructor failed (indicated by a
2221			 * return code of 1). Try again.
2222			 */
2223			ASSERT(rc == -1);
2224			return (kmem_cache_alloc(cp, kmflag));
2225		}
2226		return (buf);
2227	}
2228
2229	if (cp->cache_constructor != NULL &&
2230	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2231		atomic_add_64(&cp->cache_alloc_fail, 1);
2232		kmem_slab_free(cp, buf);
2233		return (NULL);
2234	}
2235
2236	return (buf);
2237}
2238
2239/*
2240 * The freed argument tells whether or not kmem_cache_free_debug() has already
2241 * been called so that we can avoid the duplicate free error. For example, a
2242 * buffer on a magazine has already been freed by the client but is still
2243 * constructed.
2244 */
2245static void
2246kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2247{
2248	if (!freed && (cp->cache_flags & KMF_BUFTAG))
2249		if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2250			return;
2251
2252	/*
2253	 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2254	 * kmem_cache_free_debug() will have already applied the destructor.
2255	 */
2256	if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2257	    cp->cache_destructor != NULL) {
2258		if (cp->cache_flags & KMF_DEADBEEF) {	/* KMF_LITE implied */
2259			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2260			*(uint64_t *)buf = btp->bt_redzone;
2261			cp->cache_destructor(buf, cp->cache_private);
2262			*(uint64_t *)buf = KMEM_FREE_PATTERN;
2263		} else {
2264			cp->cache_destructor(buf, cp->cache_private);
2265		}
2266	}
2267
2268	kmem_slab_free(cp, buf);
2269}
2270
2271/*
2272 * Free a constructed object to cache cp.
2273 */
2274void
2275kmem_cache_free(kmem_cache_t *cp, void *buf)
2276{
2277	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2278	kmem_magazine_t *emp;
2279	kmem_magtype_t *mtp;
2280
2281	/*
2282	 * The client must not free either of the buffers passed to the move
2283	 * callback function.
2284	 */
2285	ASSERT(cp->cache_defrag == NULL ||
2286	    cp->cache_defrag->kmd_thread != curthread ||
2287	    (buf != cp->cache_defrag->kmd_from_buf &&
2288	    buf != cp->cache_defrag->kmd_to_buf));
2289
2290	if (ccp->cc_flags & KMF_BUFTAG)
2291		if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2292			return;
2293
2294	mutex_enter(&ccp->cc_lock);
2295	for (;;) {
2296		/*
2297		 * If there's a slot available in the current CPU's
2298		 * loaded magazine, just put the object there and return.
2299		 */
2300		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2301			ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2302			ccp->cc_free++;
2303			mutex_exit(&ccp->cc_lock);
2304			return;
2305		}
2306
2307		/*
2308		 * The loaded magazine is full.  If the previously loaded
2309		 * magazine was empty, exchange them and try again.
2310		 */
2311		if (ccp->cc_prounds == 0) {
2312			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2313			continue;
2314		}
2315
2316		/*
2317		 * If the magazine layer is disabled, break out now.
2318		 */
2319		if (ccp->cc_magsize == 0)
2320			break;
2321
2322		/*
2323		 * Try to get an empty magazine from the depot.
2324		 */
2325		emp = kmem_depot_alloc(cp, &cp->cache_empty);
2326		if (emp != NULL) {
2327			if (ccp->cc_ploaded != NULL)
2328				kmem_depot_free(cp, &cp->cache_full,
2329				    ccp->cc_ploaded);
2330			kmem_cpu_reload(ccp, emp, 0);
2331			continue;
2332		}
2333
2334		/*
2335		 * There are no empty magazines in the depot,
2336		 * so try to allocate a new one.  We must drop all locks
2337		 * across kmem_cache_alloc() because lower layers may
2338		 * attempt to allocate from this cache.
2339		 */
2340		mtp = cp->cache_magtype;
2341		mutex_exit(&ccp->cc_lock);
2342		emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2343		mutex_enter(&ccp->cc_lock);
2344
2345		if (emp != NULL) {
2346			/*
2347			 * We successfully allocated an empty magazine.
2348			 * However, we had to drop ccp->cc_lock to do it,
2349			 * so the cache's magazine size may have changed.
2350			 * If so, free the magazine and try again.
2351			 */
2352			if (ccp->cc_magsize != mtp->mt_magsize) {
2353				mutex_exit(&ccp->cc_lock);
2354				kmem_cache_free(mtp->mt_cache, emp);
2355				mutex_enter(&ccp->cc_lock);
2356				continue;
2357			}
2358
2359			/*
2360			 * We got a magazine of the right size.  Add it to
2361			 * the depot and try the whole dance again.
2362			 */
2363			kmem_depot_free(cp, &cp->cache_empty, emp);
2364			continue;
2365		}
2366
2367		/*
2368		 * We couldn't allocate an empty magazine,
2369		 * so fall through to the slab layer.
2370		 */
2371		break;
2372	}
2373	mutex_exit(&ccp->cc_lock);
2374
2375	/*
2376	 * We couldn't free our constructed object to the magazine layer,
2377	 * so apply its destructor and free it to the slab layer.
2378	 */
2379	kmem_slab_free_constructed(cp, buf, B_TRUE);
2380}
2381
2382void *
2383kmem_zalloc(size_t size, int kmflag)
2384{
2385	size_t index = (size - 1) >> KMEM_ALIGN_SHIFT;
2386	void *buf;
2387
2388	if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) {
2389		kmem_cache_t *cp = kmem_alloc_table[index];
2390		buf = kmem_cache_alloc(cp, kmflag);
2391		if (buf != NULL) {
2392			if (cp->cache_flags & KMF_BUFTAG) {
2393				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2394				((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2395				((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2396
2397				if (cp->cache_flags & KMF_LITE) {
2398					KMEM_BUFTAG_LITE_ENTER(btp,
2399					    kmem_lite_count, caller());
2400				}
2401			}
2402			bzero(buf, size);
2403		}
2404	} else {
2405		buf = kmem_alloc(size, kmflag);
2406		if (buf != NULL)
2407			bzero(buf, size);
2408	}
2409	return (buf);
2410}
2411
2412void *
2413kmem_alloc(size_t size, int kmflag)
2414{
2415	size_t index = (size - 1) >> KMEM_ALIGN_SHIFT;
2416	void *buf;
2417
2418	if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) {
2419		kmem_cache_t *cp = kmem_alloc_table[index];
2420		buf = kmem_cache_alloc(cp, kmflag);
2421		if ((cp->cache_flags & KMF_BUFTAG) && buf != NULL) {
2422			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2423			((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2424			((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2425
2426			if (cp->cache_flags & KMF_LITE) {
2427				KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2428				    caller());
2429			}
2430		}
2431		return (buf);
2432	}
2433	if (size == 0)
2434		return (NULL);
2435	buf = vmem_alloc(kmem_oversize_arena, size, kmflag & KM_VMFLAGS);
2436	if (buf == NULL)
2437		kmem_log_event(kmem_failure_log, NULL, NULL, (void *)size);
2438	return (buf);
2439}
2440
2441void
2442kmem_free(void *buf, size_t size)
2443{
2444	size_t index = (size - 1) >> KMEM_ALIGN_SHIFT;
2445
2446	if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) {
2447		kmem_cache_t *cp = kmem_alloc_table[index];
2448		if (cp->cache_flags & KMF_BUFTAG) {
2449			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2450			uint32_t *ip = (uint32_t *)btp;
2451			if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2452				if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2453					kmem_error(KMERR_DUPFREE, cp, buf);
2454					return;
2455				}
2456				if (KMEM_SIZE_VALID(ip[1])) {
2457					ip[0] = KMEM_SIZE_ENCODE(size);
2458					kmem_error(KMERR_BADSIZE, cp, buf);
2459				} else {
2460					kmem_error(KMERR_REDZONE, cp, buf);
2461				}
2462				return;
2463			}
2464			if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2465				kmem_error(KMERR_REDZONE, cp, buf);
2466				return;
2467			}
2468			btp->bt_redzone = KMEM_REDZONE_PATTERN;
2469			if (cp->cache_flags & KMF_LITE) {
2470				KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2471				    caller());
2472			}
2473		}
2474		kmem_cache_free(cp, buf);
2475	} else {
2476		if (buf == NULL && size == 0)
2477			return;
2478		vmem_free(kmem_oversize_arena, buf, size);
2479	}
2480}
2481
2482void *
2483kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2484{
2485	size_t realsize = size + vmp->vm_quantum;
2486	void *addr;
2487
2488	/*
2489	 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2490	 * vm_quantum will cause integer wraparound.  Check for this, and
2491	 * blow off the firewall page in this case.  Note that such a
2492	 * giant allocation (the entire kernel address space) can never
2493	 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
2494	 * or sleep forever (VM_SLEEP).  Thus, there is no need for a
2495	 * corresponding check in kmem_firewall_va_free().
2496	 */
2497	if (realsize < size)
2498		realsize = size;
2499
2500	/*
2501	 * While boot still owns resource management, make sure that this
2502	 * redzone virtual address allocation is properly accounted for in
2503	 * OBPs "virtual-memory" "available" lists because we're
2504	 * effectively claiming them for a red zone.  If we don't do this,
2505	 * the available lists become too fragmented and too large for the
2506	 * current boot/kernel memory list interface.
2507	 */
2508	addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
2509
2510	if (addr != NULL && kvseg.s_base == NULL && realsize != size)
2511		(void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
2512
2513	return (addr);
2514}
2515
2516void
2517kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
2518{
2519	ASSERT((kvseg.s_base == NULL ?
2520	    va_to_pfn((char *)addr + size) :
2521	    hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
2522
2523	vmem_free(vmp, addr, size + vmp->vm_quantum);
2524}
2525
2526/*
2527 * Try to allocate at least `size' bytes of memory without sleeping or
2528 * panicking. Return actual allocated size in `asize'. If allocation failed,
2529 * try final allocation with sleep or panic allowed.
2530 */
2531void *
2532kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
2533{
2534	void *p;
2535
2536	*asize = P2ROUNDUP(size, KMEM_ALIGN);
2537	do {
2538		p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
2539		if (p != NULL)
2540			return (p);
2541		*asize += KMEM_ALIGN;
2542	} while (*asize <= PAGESIZE);
2543
2544	*asize = P2ROUNDUP(size, KMEM_ALIGN);
2545	return (kmem_alloc(*asize, kmflag));
2546}
2547
2548/*
2549 * Reclaim all unused memory from a cache.
2550 */
2551static void
2552kmem_cache_reap(kmem_cache_t *cp)
2553{
2554	ASSERT(taskq_member(kmem_taskq, curthread));
2555
2556	/*
2557	 * Ask the cache's owner to free some memory if possible.
2558	 * The idea is to handle things like the inode cache, which
2559	 * typically sits on a bunch of memory that it doesn't truly
2560	 * *need*.  Reclaim policy is entirely up to the owner; this
2561	 * callback is just an advisory plea for help.
2562	 */
2563	if (cp->cache_reclaim != NULL) {
2564		long delta;
2565
2566		/*
2567		 * Reclaimed memory should be reapable (not included in the
2568		 * depot's working set).
2569		 */
2570		delta = cp->cache_full.ml_total;
2571		cp->cache_reclaim(cp->cache_private);
2572		delta = cp->cache_full.ml_total - delta;
2573		if (delta > 0) {
2574			mutex_enter(&cp->cache_depot_lock);
2575			cp->cache_full.ml_reaplimit += delta;
2576			cp->cache_full.ml_min += delta;
2577			mutex_exit(&cp->cache_depot_lock);
2578		}
2579	}
2580
2581	kmem_depot_ws_reap(cp);
2582
2583	if (cp->cache_defrag != NULL && !kmem_move_noreap) {
2584		kmem_cache_defrag(cp);
2585	}
2586}
2587
2588static void
2589kmem_reap_timeout(void *flag_arg)
2590{
2591	uint32_t *flag = (uint32_t *)flag_arg;
2592
2593	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
2594	*flag = 0;
2595}
2596
2597static void
2598kmem_reap_done(void *flag)
2599{
2600	(void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
2601}
2602
2603static void
2604kmem_reap_start(void *flag)
2605{
2606	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
2607
2608	if (flag == &kmem_reaping) {
2609		kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
2610		/*
2611		 * if we have segkp under heap, reap segkp cache.
2612		 */
2613		if (segkp_fromheap)
2614			segkp_cache_free();
2615	}
2616	else
2617		kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
2618
2619	/*
2620	 * We use taskq_dispatch() to schedule a timeout to clear
2621	 * the flag so that kmem_reap() becomes self-throttling:
2622	 * we won't reap again until the current reap completes *and*
2623	 * at least kmem_reap_interval ticks have elapsed.
2624	 */
2625	if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
2626		kmem_reap_done(flag);
2627}
2628
2629static void
2630kmem_reap_common(void *flag_arg)
2631{
2632	uint32_t *flag = (uint32_t *)flag_arg;
2633
2634	if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
2635	    cas32(flag, 0, 1) != 0)
2636		return;
2637
2638	/*
2639	 * It may not be kosher to do memory allocation when a reap is called
2640	 * is called (for example, if vmem_populate() is in the call chain).
2641	 * So we start the reap going with a TQ_NOALLOC dispatch.  If the
2642	 * dispatch fails, we reset the flag, and the next reap will try again.
2643	 */
2644	if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
2645		*flag = 0;
2646}
2647
2648/*
2649 * Reclaim all unused memory from all caches.  Called from the VM system
2650 * when memory gets tight.
2651 */
2652void
2653kmem_reap(void)
2654{
2655	kmem_reap_common(&kmem_reaping);
2656}
2657
2658/*
2659 * Reclaim all unused memory from identifier arenas, called when a vmem
2660 * arena not back by memory is exhausted.  Since reaping memory-backed caches
2661 * cannot help with identifier exhaustion, we avoid both a large amount of
2662 * work and unwanted side-effects from reclaim callbacks.
2663 */
2664void
2665kmem_reap_idspace(void)
2666{
2667	kmem_reap_common(&kmem_reaping_idspace);
2668}
2669
2670/*
2671 * Purge all magazines from a cache and set its magazine limit to zero.
2672 * All calls are serialized by the kmem_taskq lock, except for the final
2673 * call from kmem_cache_destroy().
2674 */
2675static void
2676kmem_cache_magazine_purge(kmem_cache_t *cp)
2677{
2678	kmem_cpu_cache_t *ccp;
2679	kmem_magazine_t *mp, *pmp;
2680	int rounds, prounds, cpu_seqid;
2681
2682	ASSERT(!list_link_active(&cp->cache_link) ||
2683	    taskq_member(kmem_taskq, curthread));
2684	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
2685
2686	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
2687		ccp = &cp->cache_cpu[cpu_seqid];
2688
2689		mutex_enter(&ccp->cc_lock);
2690		mp = ccp->cc_loaded;
2691		pmp = ccp->cc_ploaded;
2692		rounds = ccp->cc_rounds;
2693		prounds = ccp->cc_prounds;
2694		ccp->cc_loaded = NULL;
2695		ccp->cc_ploaded = NULL;
2696		ccp->cc_rounds = -1;
2697		ccp->cc_prounds = -1;
2698		ccp->cc_magsize = 0;
2699		mutex_exit(&ccp->cc_lock);
2700
2701		if (mp)
2702			kmem_magazine_destroy(cp, mp, rounds);
2703		if (pmp)
2704			kmem_magazine_destroy(cp, pmp, prounds);
2705	}
2706
2707	/*
2708	 * Updating the working set statistics twice in a row has the
2709	 * effect of setting the working set size to zero, so everything
2710	 * is eligible for reaping.
2711	 */
2712	kmem_depot_ws_update(cp);
2713	kmem_depot_ws_update(cp);
2714
2715	kmem_depot_ws_reap(cp);
2716}
2717
2718/*
2719 * Enable per-cpu magazines on a cache.
2720 */
2721static void
2722kmem_cache_magazine_enable(kmem_cache_t *cp)
2723{
2724	int cpu_seqid;
2725
2726	if (cp->cache_flags & KMF_NOMAGAZINE)
2727		return;
2728
2729	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
2730		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2731		mutex_enter(&ccp->cc_lock);
2732		ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2733		mutex_exit(&ccp->cc_lock);
2734	}
2735
2736}
2737
2738/*
2739 * Reap (almost) everything right now.  See kmem_cache_magazine_purge()
2740 * for explanation of the back-to-back kmem_depot_ws_update() calls.
2741 */
2742void
2743kmem_cache_reap_now(kmem_cache_t *cp)
2744{
2745	ASSERT(list_link_active(&cp->cache_link));
2746
2747	kmem_depot_ws_update(cp);
2748	kmem_depot_ws_update(cp);
2749
2750	(void) taskq_dispatch(kmem_taskq,
2751	    (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
2752	taskq_wait(kmem_taskq);
2753}
2754
2755/*
2756 * Recompute a cache's magazine size.  The trade-off is that larger magazines
2757 * provide a higher transfer rate with the depot, while smaller magazines
2758 * reduce memory consumption.  Magazine resizing is an expensive operation;
2759 * it should not be done frequently.
2760 *
2761 * Changes to the magazine size are serialized by the kmem_taskq lock.
2762 *
2763 * Note: at present this only grows the magazine size.  It might be useful
2764 * to allow shrinkage too.
2765 */
2766static void
2767kmem_cache_magazine_resize(kmem_cache_t *cp)
2768{
2769	kmem_magtype_t *mtp = cp->cache_magtype;
2770
2771	ASSERT(taskq_member(kmem_taskq, curthread));
2772
2773	if (cp->cache_chunksize < mtp->mt_maxbuf) {
2774		kmem_cache_magazine_purge(cp);
2775		mutex_enter(&cp->cache_depot_lock);
2776		cp->cache_magtype = ++mtp;
2777		cp->cache_depot_contention_prev =
2778		    cp->cache_depot_contention + INT_MAX;
2779		mutex_exit(&cp->cache_depot_lock);
2780		kmem_cache_magazine_enable(cp);
2781	}
2782}
2783
2784/*
2785 * Rescale a cache's hash table, so that the table size is roughly the
2786 * cache size.  We want the average lookup time to be extremely small.
2787 */
2788static void
2789kmem_hash_rescale(kmem_cache_t *cp)
2790{
2791	kmem_bufctl_t **old_table, **new_table, *bcp;
2792	size_t old_size, new_size, h;
2793
2794	ASSERT(taskq_member(kmem_taskq, curthread));
2795
2796	new_size = MAX(KMEM_HASH_INITIAL,
2797	    1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2798	old_size = cp->cache_hash_mask + 1;
2799
2800	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
2801		return;
2802
2803	new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
2804	    VM_NOSLEEP);
2805	if (new_table == NULL)
2806		return;
2807	bzero(new_table, new_size * sizeof (void *));
2808
2809	mutex_enter(&cp->cache_lock);
2810
2811	old_size = cp->cache_hash_mask + 1;
2812	old_table = cp->cache_hash_table;
2813
2814	cp->cache_hash_mask = new_size - 1;
2815	cp->cache_hash_table = new_table;
2816	cp->cache_rescale++;
2817
2818	for (h = 0; h < old_size; h++) {
2819		bcp = old_table[h];
2820		while (bcp != NULL) {
2821			void *addr = bcp->bc_addr;
2822			kmem_bufctl_t *next_bcp = bcp->bc_next;
2823			kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
2824			bcp->bc_next = *hash_bucket;
2825			*hash_bucket = bcp;
2826			bcp = next_bcp;
2827		}
2828	}
2829
2830	mutex_exit(&cp->cache_lock);
2831
2832	vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
2833}
2834
2835/*
2836 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
2837 * update, magazine resizing, and slab consolidation.
2838 */
2839static void
2840kmem_cache_update(kmem_cache_t *cp)
2841{
2842	int need_hash_rescale = 0;
2843	int need_magazine_resize = 0;
2844
2845	ASSERT(MUTEX_HELD(&kmem_cache_lock));
2846
2847	/*
2848	 * If the cache has become much larger or smaller than its hash table,
2849	 * fire off a request to rescale the hash table.
2850	 */
2851	mutex_enter(&cp->cache_lock);
2852
2853	if ((cp->cache_flags & KMF_HASH) &&
2854	    (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2855	    (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2856	    cp->cache_hash_mask > KMEM_HASH_INITIAL)))
2857		need_hash_rescale = 1;
2858
2859	mutex_exit(&cp->cache_lock);
2860
2861	/*
2862	 * Update the depot working set statistics.
2863	 */
2864	kmem_depot_ws_update(cp);
2865
2866	/*
2867	 * If there's a lot of contention in the depot,
2868	 * increase the magazine size.
2869	 */
2870	mutex_enter(&cp->cache_depot_lock);
2871
2872	if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2873	    (int)(cp->cache_depot_contention -
2874	    cp->cache_depot_contention_prev) > kmem_depot_contention)
2875		need_magazine_resize = 1;
2876
2877	cp->cache_depot_contention_prev = cp->cache_depot_contention;
2878
2879	mutex_exit(&cp->cache_depot_lock);
2880
2881	if (need_hash_rescale)
2882		(void) taskq_dispatch(kmem_taskq,
2883		    (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
2884
2885	if (need_magazine_resize)
2886		(void) taskq_dispatch(kmem_taskq,
2887		    (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
2888
2889	if (cp->cache_defrag != NULL)
2890		(void) taskq_dispatch(kmem_taskq,
2891		    (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
2892}
2893
2894static void
2895kmem_update_timeout(void *dummy)
2896{
2897	static void kmem_update(void *);
2898
2899	(void) timeout(kmem_update, dummy, kmem_reap_interval);
2900}
2901
2902static void
2903kmem_update(void *dummy)
2904{
2905	kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
2906
2907	/*
2908	 * We use taskq_dispatch() to reschedule the timeout so that
2909	 * kmem_update() becomes self-throttling: it won't schedule
2910	 * new tasks until all previous tasks have completed.
2911	 */
2912	if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
2913		kmem_update_timeout(NULL);
2914}
2915
2916static int
2917kmem_cache_kstat_update(kstat_t *ksp, int rw)
2918{
2919	struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
2920	kmem_cache_t *cp = ksp->ks_private;
2921	uint64_t cpu_buf_avail;
2922	uint64_t buf_avail = 0;
2923	int cpu_seqid;
2924
2925	ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
2926
2927	if (rw == KSTAT_WRITE)
2928		return (EACCES);
2929
2930	mutex_enter(&cp->cache_lock);
2931
2932	kmcp->kmc_alloc_fail.value.ui64		= cp->cache_alloc_fail;
2933	kmcp->kmc_alloc.value.ui64		= cp->cache_slab_alloc;
2934	kmcp->kmc_free.value.ui64		= cp->cache_slab_free;
2935	kmcp->kmc_slab_alloc.value.ui64		= cp->cache_slab_alloc;
2936	kmcp->kmc_slab_free.value.ui64		= cp->cache_slab_free;
2937
2938	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
2939		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2940
2941		mutex_enter(&ccp->cc_lock);
2942
2943		cpu_buf_avail = 0;
2944		if (ccp->cc_rounds > 0)
2945			cpu_buf_avail += ccp->cc_rounds;
2946		if (ccp->cc_prounds > 0)
2947			cpu_buf_avail += ccp->cc_prounds;
2948
2949		kmcp->kmc_alloc.value.ui64	+= ccp->cc_alloc;
2950		kmcp->kmc_free.value.ui64	+= ccp->cc_free;
2951		buf_avail			+= cpu_buf_avail;
2952
2953		mutex_exit(&ccp->cc_lock);
2954	}
2955
2956	mutex_enter(&cp->cache_depot_lock);
2957
2958	kmcp->kmc_depot_alloc.value.ui64	= cp->cache_full.ml_alloc;
2959	kmcp->kmc_depot_free.value.ui64		= cp->cache_empty.ml_alloc;
2960	kmcp->kmc_depot_contention.value.ui64	= cp->cache_depot_contention;
2961	kmcp->kmc_full_magazines.value.ui64	= cp->cache_full.ml_total;
2962	kmcp->kmc_empty_magazines.value.ui64	= cp->cache_empty.ml_total;
2963	kmcp->kmc_magazine_size.value.ui64	=
2964	    (cp->cache_flags & KMF_NOMAGAZINE) ?
2965	    0 : cp->cache_magtype->mt_magsize;
2966
2967	kmcp->kmc_alloc.value.ui64		+= cp->cache_full.ml_alloc;
2968	kmcp->kmc_free.value.ui64		+= cp->cache_empty.ml_alloc;
2969	buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
2970
2971	mutex_exit(&cp->cache_depot_lock);
2972
2973	kmcp->kmc_buf_size.value.ui64	= cp->cache_bufsize;
2974	kmcp->kmc_align.value.ui64	= cp->cache_align;
2975	kmcp->kmc_chunk_size.value.ui64	= cp->cache_chunksize;
2976	kmcp->kmc_slab_size.value.ui64	= cp->cache_slabsize;
2977	kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
2978	buf_avail += cp->cache_bufslab;
2979	kmcp->kmc_buf_avail.value.ui64	= buf_avail;
2980	kmcp->kmc_buf_inuse.value.ui64	= cp->cache_buftotal - buf_avail;
2981	kmcp->kmc_buf_total.value.ui64	= cp->cache_buftotal;
2982	kmcp->kmc_buf_max.value.ui64	= cp->cache_bufmax;
2983	kmcp->kmc_slab_create.value.ui64	= cp->cache_slab_create;
2984	kmcp->kmc_slab_destroy.value.ui64	= cp->cache_slab_destroy;
2985	kmcp->kmc_hash_size.value.ui64	= (cp->cache_flags & KMF_HASH) ?
2986	    cp->cache_hash_mask + 1 : 0;
2987	kmcp->kmc_hash_lookup_depth.value.ui64	= cp->cache_lookup_depth;
2988	kmcp->kmc_hash_rescale.value.ui64	= cp->cache_rescale;
2989	kmcp->kmc_vmem_source.value.ui64	= cp->cache_arena->vm_id;
2990
2991	if (cp->cache_defrag == NULL) {
2992		kmcp->kmc_move_callbacks.value.ui64	= 0;
2993		kmcp->kmc_move_yes.value.ui64		= 0;
2994		kmcp->kmc_move_no.value.ui64		= 0;
2995		kmcp->kmc_move_later.value.ui64		= 0;
2996		kmcp->kmc_move_dont_need.value.ui64	= 0;
2997		kmcp->kmc_move_dont_know.value.ui64	= 0;
2998		kmcp->kmc_move_hunt_found.value.ui64	= 0;
2999	} else {
3000		kmem_defrag_t *kd = cp->cache_defrag;
3001		kmcp->kmc_move_callbacks.value.ui64	= kd->kmd_callbacks;
3002		kmcp->kmc_move_yes.value.ui64		= kd->kmd_yes;
3003		kmcp->kmc_move_no.value.ui64		= kd->kmd_no;
3004		kmcp->kmc_move_later.value.ui64		= kd->kmd_later;
3005		kmcp->kmc_move_dont_need.value.ui64	= kd->kmd_dont_need;
3006		kmcp->kmc_move_dont_know.value.ui64	= kd->kmd_dont_know;
3007		kmcp->kmc_move_hunt_found.value.ui64	= kd->kmd_hunt_found;
3008	}
3009
3010	mutex_exit(&cp->cache_lock);
3011	return (0);
3012}
3013
3014/*
3015 * Return a named statistic about a particular cache.
3016 * This shouldn't be called very often, so it's currently designed for
3017 * simplicity (leverages existing kstat support) rather than efficiency.
3018 */
3019uint64_t
3020kmem_cache_stat(kmem_cache_t *cp, char *name)
3021{
3022	int i;
3023	kstat_t *ksp = cp->cache_kstat;
3024	kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3025	uint64_t value = 0;
3026
3027	if (ksp != NULL) {
3028		mutex_enter(&kmem_cache_kstat_lock);
3029		(void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3030		for (i = 0; i < ksp->ks_ndata; i++) {
3031			if (strcmp(knp[i].name, name) == 0) {
3032				value = knp[i].value.ui64;
3033				break;
3034			}
3035		}
3036		mutex_exit(&kmem_cache_kstat_lock);
3037	}
3038	return (value);
3039}
3040
3041/*
3042 * Return an estimate of currently available kernel heap memory.
3043 * On 32-bit systems, physical memory may exceed virtual memory,
3044 * we just truncate the result at 1GB.
3045 */
3046size_t
3047kmem_avail(void)
3048{
3049	spgcnt_t rmem = availrmem - tune.t_minarmem;
3050	spgcnt_t fmem = freemem - minfree;
3051
3052	return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3053	    1 << (30 - PAGESHIFT))));
3054}
3055
3056/*
3057 * Return the maximum amount of memory that is (in theory) allocatable
3058 * from the heap. This may be used as an estimate only since there
3059 * is no guarentee this space will still be available when an allocation
3060 * request is made, nor that the space may be allocated in one big request
3061 * due to kernel heap fragmentation.
3062 */
3063size_t
3064kmem_maxavail(void)
3065{
3066	spgcnt_t pmem = availrmem - tune.t_minarmem;
3067	spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3068
3069	return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3070}
3071
3072/*
3073 * Indicate whether memory-intensive kmem debugging is enabled.
3074 */
3075int
3076kmem_debugging(void)
3077{
3078	return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3079}
3080
3081/* binning function, sorts finely at the two extremes */
3082#define	KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)				\
3083	((((sp)->slab_refcnt <= (binshift)) ||				\
3084	    (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))	\
3085	    ? -(sp)->slab_refcnt					\
3086	    : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3087
3088/*
3089 * Minimizing the number of partial slabs on the freelist minimizes
3090 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3091 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3092 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3093 * the most-used slabs at the front of the list where they have the best chance
3094 * of being completely allocated, and the least-used slabs at a safe distance
3095 * from the front to improve the odds that the few remaining buffers will all be
3096 * freed before another allocation can tie up the slab. For that reason a slab
3097 * with a higher slab_refcnt sorts less than than a slab with a lower
3098 * slab_refcnt.
3099 *
3100 * However, if a slab has at least one buffer that is deemed unfreeable, we
3101 * would rather have that slab at the front of the list regardless of
3102 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3103 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3104 * callback, the slab is marked unfreeable for as long as it remains on the
3105 * freelist.
3106 */
3107static int
3108kmem_partial_slab_cmp(const void *p0, const void *p1)
3109{
3110	const kmem_cache_t *cp;
3111	const kmem_slab_t *s0 = p0;
3112	const kmem_slab_t *s1 = p1;
3113	int w0, w1;
3114	size_t binshift;
3115
3116	ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3117	ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3118	ASSERT(s0->slab_cache == s1->slab_cache);
3119	cp = s1->slab_cache;
3120	ASSERT(MUTEX_HELD(&cp->cache_lock));
3121	binshift = cp->cache_partial_binshift;
3122
3123	/* weight of first slab */
3124	w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3125	if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3126		w0 -= cp->cache_maxchunks;
3127	}
3128
3129	/* weight of second slab */
3130	w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3131	if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3132		w1 -= cp->cache_maxchunks;
3133	}
3134
3135	if (w0 < w1)
3136		return (-1);
3137	if (w0 > w1)
3138		return (1);
3139
3140	/* compare pointer values */
3141	if ((uintptr_t)s0 < (uintptr_t)s1)
3142		return (-1);
3143	if ((uintptr_t)s0 > (uintptr_t)s1)
3144		return (1);
3145
3146	return (0);
3147}
3148
3149static void
3150kmem_check_destructor(kmem_cache_t *cp)
3151{
3152	if (cp->cache_destructor == NULL)
3153		return;
3154
3155	/*
3156	 * Assert that it is valid to call the destructor on a newly constructed
3157	 * object without any intervening client code using the object.
3158	 * Allocate from the slab layer to ensure that the client has not
3159	 * touched the buffer.
3160	 */
3161	void *buf = kmem_slab_alloc(cp, KM_NOSLEEP);
3162	if (buf == NULL)
3163		return;
3164
3165	if (cp->cache_flags & KMF_BUFTAG) {
3166		if (kmem_cache_alloc_debug(cp, buf, KM_NOSLEEP, 1,
3167		    caller()) != 0)
3168			return;
3169	} else if (cp->cache_constructor != NULL &&
3170	    cp->cache_constructor(buf, cp->cache_private, KM_NOSLEEP) != 0) {
3171		atomic_add_64(&cp->cache_alloc_fail, 1);
3172		kmem_slab_free(cp, buf);
3173		return;
3174	}
3175
3176	kmem_slab_free_constructed(cp, buf, B_FALSE);
3177}
3178
3179/*
3180 * It must be valid to call the destructor (if any) on a newly created object.
3181 * That is, the constructor (if any) must leave the object in a valid state for
3182 * the destructor.
3183 */
3184kmem_cache_t *
3185kmem_cache_create(
3186	char *name,		/* descriptive name for this cache */
3187	size_t bufsize,		/* size of the objects it manages */
3188	size_t align,		/* required object alignment */
3189	int (*constructor)(void *, void *, int), /* object constructor */
3190	void (*destructor)(void *, void *),	/* object destructor */
3191	void (*reclaim)(void *), /* memory reclaim callback */
3192	void *private,		/* pass-thru arg for constr/destr/reclaim */
3193	vmem_t *vmp,		/* vmem source for slab allocation */
3194	int cflags)		/* cache creation flags */
3195{
3196	int cpu_seqid;
3197	size_t chunksize;
3198	kmem_cache_t *cp;
3199	kmem_magtype_t *mtp;
3200	size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3201
3202#ifdef	DEBUG
3203	/*
3204	 * Cache names should conform to the rules for valid C identifiers
3205	 */
3206	if (!strident_valid(name)) {
3207		cmn_err(CE_CONT,
3208		    "kmem_cache_create: '%s' is an invalid cache name\n"
3209		    "cache names must conform to the rules for "
3210		    "C identifiers\n", name);
3211	}
3212#endif	/* DEBUG */
3213
3214	if (vmp == NULL)
3215		vmp = kmem_default_arena;
3216
3217	/*
3218	 * If this kmem cache has an identifier vmem arena as its source, mark
3219	 * it such to allow kmem_reap_idspace().
3220	 */
3221	ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3222	if (vmp->vm_cflags & VMC_IDENTIFIER)
3223		cflags |= KMC_IDENTIFIER;
3224
3225	/*
3226	 * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3227	 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3228	 * false sharing of per-CPU data.
3229	 */
3230	cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3231	    P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3232	bzero(cp, csize);
3233	list_link_init(&cp->cache_link);
3234
3235	if (align == 0)
3236		align = KMEM_ALIGN;
3237
3238	/*
3239	 * If we're not at least KMEM_ALIGN aligned, we can't use free
3240	 * memory to hold bufctl information (because we can't safely
3241	 * perform word loads and stores on it).
3242	 */
3243	if (align < KMEM_ALIGN)
3244		cflags |= KMC_NOTOUCH;
3245
3246	if ((align & (align - 1)) != 0 || align > vmp->vm_quantum)
3247		panic("kmem_cache_create: bad alignment %lu", align);
3248
3249	mutex_enter(&kmem_flags_lock);
3250	if (kmem_flags & KMF_RANDOMIZE)
3251		kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3252		    KMF_RANDOMIZE;
3253	cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3254	mutex_exit(&kmem_flags_lock);
3255
3256	/*
3257	 * Make sure all the various flags are reasonable.
3258	 */
3259	ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3260
3261	if (cp->cache_flags & KMF_LITE) {
3262		if (bufsize >= kmem_lite_minsize &&
3263		    align <= kmem_lite_maxalign &&
3264		    P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3265			cp->cache_flags |= KMF_BUFTAG;
3266			cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3267		} else {
3268			cp->cache_flags &= ~KMF_DEBUG;
3269		}
3270	}
3271
3272	if (cp->cache_flags & KMF_DEADBEEF)
3273		cp->cache_flags |= KMF_REDZONE;
3274
3275	if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3276		cp->cache_flags |= KMF_NOMAGAZINE;
3277
3278	if (cflags & KMC_NODEBUG)
3279		cp->cache_flags &= ~KMF_DEBUG;
3280
3281	if (cflags & KMC_NOTOUCH)
3282		cp->cache_flags &= ~KMF_TOUCH;
3283
3284	if (cflags & KMC_NOHASH)
3285		cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3286
3287	if (cflags & KMC_NOMAGAZINE)
3288		cp->cache_flags |= KMF_NOMAGAZINE;
3289
3290	if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3291		cp->cache_flags |= KMF_REDZONE;
3292
3293	if (!(cp->cache_flags & KMF_AUDIT))
3294		cp->cache_flags &= ~KMF_CONTENTS;
3295
3296	if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3297	    !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3298		cp->cache_flags |= KMF_FIREWALL;
3299
3300	if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3301		cp->cache_flags &= ~KMF_FIREWALL;
3302
3303	if (cp->cache_flags & KMF_FIREWALL) {
3304		cp->cache_flags &= ~KMF_BUFTAG;
3305		cp->cache_flags |= KMF_NOMAGAZINE;
3306		ASSERT(vmp == kmem_default_arena);
3307		vmp = kmem_firewall_arena;
3308	}
3309
3310	/*
3311	 * Set cache properties.
3312	 */
3313	(void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3314	strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3315	cp->cache_bufsize = bufsize;
3316	cp->cache_align = align;
3317	cp->cache_constructor = constructor;
3318	cp->cache_destructor = destructor;
3319	cp->cache_reclaim = reclaim;
3320	cp->cache_private = private;
3321	cp->cache_arena = vmp;
3322	cp->cache_cflags = cflags;
3323
3324	/*
3325	 * Determine the chunk size.
3326	 */
3327	chunksize = bufsize;
3328
3329	if (align >= KMEM_ALIGN) {
3330		chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3331		cp->cache_bufctl = chunksize - KMEM_ALIGN;
3332	}
3333
3334	if (cp->cache_flags & KMF_BUFTAG) {
3335		cp->cache_bufctl = chunksize;
3336		cp->cache_buftag = chunksize;
3337		if (cp->cache_flags & KMF_LITE)
3338			chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3339		else
3340			chunksize += sizeof (kmem_buftag_t);
3341	}
3342
3343	if (cp->cache_flags & KMF_DEADBEEF) {
3344		cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3345		if (cp->cache_flags & KMF_LITE)
3346			cp->cache_verify = sizeof (uint64_t);
3347	}
3348
3349	cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3350
3351	cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3352
3353	/*
3354	 * Now that we know the chunk size, determine the optimal slab size.
3355	 */
3356	if (vmp == kmem_firewall_arena) {
3357		cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3358		cp->cache_mincolor = cp->cache_slabsize - chunksize;
3359		cp->cache_maxcolor = cp->cache_mincolor;
3360		cp->cache_flags |= KMF_HASH;
3361		ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3362	} else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3363	    !(cp->cache_flags & KMF_AUDIT) &&
3364	    chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3365		cp->cache_slabsize = vmp->vm_quantum;
3366		cp->cache_mincolor = 0;
3367		cp->cache_maxcolor =
3368		    (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3369		ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3370		ASSERT(!(cp->cache_flags & KMF_AUDIT));
3371	} else {
3372		size_t chunks, bestfit, waste, slabsize;
3373		size_t minwaste = LONG_MAX;
3374
3375		for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3376			slabsize = P2ROUNDUP(chunksize * chunks,
3377			    vmp->vm_quantum);
3378			chunks = slabsize / chunksize;
3379			waste = (slabsize % chunksize) / chunks;
3380			if (waste < minwaste) {
3381				minwaste = waste;
3382				bestfit = slabsize;
3383			}
3384		}
3385		if (cflags & KMC_QCACHE)
3386			bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3387		cp->cache_slabsize = bestfit;
3388		cp->cache_mincolor = 0;
3389		cp->cache_maxcolor = bestfit % chunksize;
3390		cp->cache_flags |= KMF_HASH;
3391	}
3392
3393	cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3394	cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3395
3396	if (cp->cache_flags & KMF_HASH) {
3397		ASSERT(!(cflags & KMC_NOHASH));
3398		cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3399		    kmem_bufctl_audit_cache : kmem_bufctl_cache;
3400	}
3401
3402	if (cp->cache_maxcolor >= vmp->vm_quantum)
3403		cp->cache_maxcolor = vmp->vm_quantum - 1;
3404
3405	cp->cache_color = cp->cache_mincolor;
3406
3407	/*
3408	 * Initialize the rest of the slab layer.
3409	 */
3410	mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3411
3412	avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3413	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3414	/* LINTED: E_TRUE_LOGICAL_EXPR */
3415	ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3416	/* reuse partial slab AVL linkage for complete slab list linkage */
3417	list_create(&cp->cache_complete_slabs,
3418	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3419
3420	if (cp->cache_flags & KMF_HASH) {
3421		cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3422		    KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3423		bzero(cp->cache_hash_table,
3424		    KMEM_HASH_INITIAL * sizeof (void *));
3425		cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3426		cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3427	}
3428
3429	/*
3430	 * Initialize the depot.
3431	 */
3432	mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3433
3434	for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3435		continue;
3436
3437	cp->cache_magtype = mtp;
3438
3439	/*
3440	 * Initialize the CPU layer.
3441	 */
3442	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3443		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3444		mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3445		ccp->cc_flags = cp->cache_flags;
3446		ccp->cc_rounds = -1;
3447		ccp->cc_prounds = -1;
3448	}
3449
3450	/*
3451	 * Create the cache's kstats.
3452	 */
3453	if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3454	    "kmem_cache", KSTAT_TYPE_NAMED,
3455	    sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3456	    KSTAT_FLAG_VIRTUAL)) != NULL) {
3457		cp->cache_kstat->ks_data = &kmem_cache_kstat;
3458		cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3459		cp->cache_kstat->ks_private = cp;
3460		cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3461		kstat_install(cp->cache_kstat);
3462	}
3463
3464	/*
3465	 * Add the cache to the global list.  This makes it visible
3466	 * to kmem_update(), so the cache must be ready for business.
3467	 */
3468	mutex_enter(&kmem_cache_lock);
3469	list_insert_tail(&kmem_caches, cp);
3470	mutex_exit(&kmem_cache_lock);
3471
3472	if (kmem_ready)
3473		kmem_cache_magazine_enable(cp);
3474
3475	if (kmem_move_taskq != NULL && cp->cache_destructor != NULL) {
3476		(void) taskq_dispatch(kmem_move_taskq,
3477		    (task_func_t *)kmem_check_destructor, cp,
3478		    TQ_NOSLEEP);
3479	}
3480
3481	return (cp);
3482}
3483
3484static int
3485kmem_move_cmp(const void *buf, const void *p)
3486{
3487	const kmem_move_t *kmm = p;
3488	uintptr_t v1 = (uintptr_t)buf;
3489	uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3490	return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3491}
3492
3493static void
3494kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
3495{
3496	kmd->kmd_reclaim_numer = 1;
3497}
3498
3499/*
3500 * Initially, when choosing candidate slabs for buffers to move, we want to be
3501 * very selective and take only slabs that are less than
3502 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
3503 * slabs, then we raise the allocation ceiling incrementally. The reclaim
3504 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
3505 * longer fragmented.
3506 */
3507static void
3508kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
3509{
3510	if (direction > 0) {
3511		/* make it easier to find a candidate slab */
3512		if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
3513			kmd->kmd_reclaim_numer++;
3514		}
3515	} else {
3516		/* be more selective */
3517		if (kmd->kmd_reclaim_numer > 1) {
3518			kmd->kmd_reclaim_numer--;
3519		}
3520	}
3521}
3522
3523void
3524kmem_cache_set_move(kmem_cache_t *cp,
3525    kmem_cbrc_t (*move)(void *, void *, size_t, void *))
3526{
3527	kmem_defrag_t *defrag;
3528
3529	ASSERT(move != NULL);
3530	/*
3531	 * The consolidator does not support NOTOUCH caches because kmem cannot
3532	 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
3533	 * a low order bit usable by clients to distinguish uninitialized memory
3534	 * from known objects (see kmem_slab_create).
3535	 */
3536	ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
3537	ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
3538
3539	/*
3540	 * We should not be holding anyone's cache lock when calling
3541	 * kmem_cache_alloc(), so allocate in all cases before acquiring the
3542	 * lock.
3543	 */
3544	defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
3545
3546	mutex_enter(&cp->cache_lock);
3547
3548	if (KMEM_IS_MOVABLE(cp)) {
3549		if (cp->cache_move == NULL) {
3550			/*
3551			 * The client must not have allocated any objects from
3552			 * this cache before setting a move callback function.
3553			 */
3554			ASSERT(cp->cache_bufmax == 0);
3555
3556			cp->cache_defrag = defrag;
3557			defrag = NULL; /* nothing to free */
3558			bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
3559			avl_create(&cp->cache_defrag->kmd_moves_pending,
3560			    kmem_move_cmp, sizeof (kmem_move_t),
3561			    offsetof(kmem_move_t, kmm_entry));
3562			/* LINTED: E_TRUE_LOGICAL_EXPR */
3563			ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3564			/* reuse the slab's AVL linkage for deadlist linkage */
3565			list_create(&cp->cache_defrag->kmd_deadlist,
3566			    sizeof (kmem_slab_t),
3567			    offsetof(kmem_slab_t, slab_link));
3568			kmem_reset_reclaim_threshold(cp->cache_defrag);
3569		}
3570		cp->cache_move = move;
3571	}
3572
3573	mutex_exit(&cp->cache_lock);
3574
3575	if (defrag != NULL) {
3576		kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
3577	}
3578}
3579
3580void
3581kmem_cache_destroy(kmem_cache_t *cp)
3582{
3583	int cpu_seqid;
3584
3585	/*
3586	 * Remove the cache from the global cache list so that no one else
3587	 * can schedule tasks on its behalf, wait for any pending tasks to
3588	 * complete, purge the cache, and then destroy it.
3589	 */
3590	mutex_enter(&kmem_cache_lock);
3591	list_remove(&kmem_caches, cp);
3592	mutex_exit(&kmem_cache_lock);
3593
3594	if (kmem_taskq != NULL)
3595		taskq_wait(kmem_taskq);
3596	if (kmem_move_taskq != NULL)
3597		taskq_wait(kmem_move_taskq);
3598
3599	kmem_cache_magazine_purge(cp);
3600
3601	mutex_enter(&cp->cache_lock);
3602	if (cp->cache_buftotal != 0)
3603		cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
3604		    cp->cache_name, (void *)cp);
3605	if (cp->cache_defrag != NULL) {
3606		avl_destroy(&cp->cache_defrag->kmd_moves_pending);
3607		list_destroy(&cp->cache_defrag->kmd_deadlist);
3608		kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
3609		cp->cache_defrag = NULL;
3610	}
3611	/*
3612	 * The cache is now dead.  There should be no further activity.  We
3613	 * enforce this by setting land mines in the constructor, destructor,
3614	 * reclaim, and move routines that induce a kernel text fault if
3615	 * invoked.
3616	 */
3617	cp->cache_constructor = (int (*)(void *, void *, int))1;
3618	cp->cache_destructor = (void (*)(void *, void *))2;
3619	cp->cache_reclaim = (void (*)(void *))3;
3620	cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
3621	mutex_exit(&cp->cache_lock);
3622
3623	kstat_delete(cp->cache_kstat);
3624
3625	if (cp->cache_hash_table != NULL)
3626		vmem_free(kmem_hash_arena, cp->cache_hash_table,
3627		    (cp->cache_hash_mask + 1) * sizeof (void *));
3628
3629	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
3630		mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
3631
3632	mutex_destroy(&cp->cache_depot_lock);
3633	mutex_destroy(&cp->cache_lock);
3634
3635	vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
3636}
3637
3638/*ARGSUSED*/
3639static int
3640kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
3641{
3642	ASSERT(MUTEX_HELD(&cpu_lock));
3643	if (what == CPU_UNCONFIG) {
3644		kmem_cache_applyall(kmem_cache_magazine_purge,
3645		    kmem_taskq, TQ_SLEEP);
3646		kmem_cache_applyall(kmem_cache_magazine_enable,
3647		    kmem_taskq, TQ_SLEEP);
3648	}
3649	return (0);
3650}
3651
3652static void
3653kmem_cache_init(int pass, int use_large_pages)
3654{
3655	int i;
3656	size_t size;
3657	kmem_cache_t *cp;
3658	kmem_magtype_t *mtp;
3659	char name[KMEM_CACHE_NAMELEN + 1];
3660
3661	for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
3662		mtp = &kmem_magtype[i];
3663		(void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
3664		mtp->mt_cache = kmem_cache_create(name,
3665		    (mtp->mt_magsize + 1) * sizeof (void *),
3666		    mtp->mt_align, NULL, NULL, NULL, NULL,
3667		    kmem_msb_arena, KMC_NOHASH);
3668	}
3669
3670	kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
3671	    sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
3672	    kmem_msb_arena, KMC_NOHASH);
3673
3674	kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
3675	    sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
3676	    kmem_msb_arena, KMC_NOHASH);
3677
3678	kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
3679	    sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
3680	    kmem_msb_arena, KMC_NOHASH);
3681
3682	if (pass == 2) {
3683		kmem_va_arena = vmem_create("kmem_va",
3684		    NULL, 0, PAGESIZE,
3685		    vmem_alloc, vmem_free, heap_arena,
3686		    8 * PAGESIZE, VM_SLEEP);
3687
3688		if (use_large_pages) {
3689			kmem_default_arena = vmem_xcreate("kmem_default",
3690			    NULL, 0, PAGESIZE,
3691			    segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
3692			    0, VM_SLEEP);
3693		} else {
3694			kmem_default_arena = vmem_create("kmem_default",
3695			    NULL, 0, PAGESIZE,
3696			    segkmem_alloc, segkmem_free, kmem_va_arena,
3697			    0, VM_SLEEP);
3698		}
3699	} else {
3700		/*
3701		 * During the first pass, the kmem_alloc_* caches
3702		 * are treated as metadata.
3703		 */
3704		kmem_default_arena = kmem_msb_arena;
3705	}
3706
3707	/*
3708	 * Set up the default caches to back kmem_alloc()
3709	 */
3710	size = KMEM_ALIGN;
3711	for (i = 0; i < sizeof (kmem_alloc_sizes) / sizeof (int); i++) {
3712		size_t align = KMEM_ALIGN;
3713		size_t cache_size = kmem_alloc_sizes[i];
3714		/*
3715		 * If they allocate a multiple of the coherency granularity,
3716		 * they get a coherency-granularity-aligned address.
3717		 */
3718		if (IS_P2ALIGNED(cache_size, 64))
3719			align = 64;
3720		if (IS_P2ALIGNED(cache_size, PAGESIZE))
3721			align = PAGESIZE;
3722		(void) sprintf(name, "kmem_alloc_%lu", cache_size);
3723		cp = kmem_cache_create(name, cache_size, align,
3724		    NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
3725		while (size <= cache_size) {
3726			kmem_alloc_table[(size - 1) >> KMEM_ALIGN_SHIFT] = cp;
3727			size += KMEM_ALIGN;
3728		}
3729	}
3730}
3731
3732void
3733kmem_init(void)
3734{
3735	kmem_cache_t *cp;
3736	int old_kmem_flags = kmem_flags;
3737	int use_large_pages = 0;
3738	size_t maxverify, minfirewall;
3739
3740	kstat_init();
3741
3742	/*
3743	 * Small-memory systems (< 24 MB) can't handle kmem_flags overhead.
3744	 */
3745	if (physmem < btop(24 << 20) && !(old_kmem_flags & KMF_STICKY))
3746		kmem_flags = 0;
3747
3748	/*
3749	 * Don't do firewalled allocations if the heap is less than 1TB
3750	 * (i.e. on a 32-bit kernel)
3751	 * The resulting VM_NEXTFIT allocations would create too much
3752	 * fragmentation in a small heap.
3753	 */
3754#if defined(_LP64)
3755	maxverify = minfirewall = PAGESIZE / 2;
3756#else
3757	maxverify = minfirewall = ULONG_MAX;
3758#endif
3759
3760	/* LINTED */
3761	ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
3762
3763	list_create(&kmem_caches, sizeof (kmem_cache_t),
3764	    offsetof(kmem_cache_t, cache_link));
3765
3766	kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
3767	    vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
3768	    VM_SLEEP | VMC_NO_QCACHE);
3769
3770	kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
3771	    PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
3772	    VM_SLEEP);
3773
3774	kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
3775	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
3776
3777	kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
3778	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
3779
3780	kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
3781	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
3782
3783	kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
3784	    NULL, 0, PAGESIZE,
3785	    kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
3786	    0, VM_SLEEP);
3787
3788	kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
3789	    segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0, VM_SLEEP);
3790
3791	/* temporary oversize arena for mod_read_system_file */
3792	kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
3793	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
3794
3795	kmem_reap_interval = 15 * hz;
3796
3797	/*
3798	 * Read /etc/system.  This is a chicken-and-egg problem because
3799	 * kmem_flags may be set in /etc/system, but mod_read_system_file()
3800	 * needs to use the allocator.  The simplest solution is to create
3801	 * all the standard kmem caches, read /etc/system, destroy all the
3802	 * caches we just created, and then create them all again in light
3803	 * of the (possibly) new kmem_flags and other kmem tunables.
3804	 */
3805	kmem_cache_init(1, 0);
3806
3807	mod_read_system_file(boothowto & RB_ASKNAME);
3808
3809	while ((cp = list_tail(&kmem_caches)) != NULL)
3810		kmem_cache_destroy(cp);
3811
3812	vmem_destroy(kmem_oversize_arena);
3813
3814	if (old_kmem_flags & KMF_STICKY)
3815		kmem_flags = old_kmem_flags;
3816
3817	if (!(kmem_flags & KMF_AUDIT))
3818		vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
3819
3820	if (kmem_maxverify == 0)
3821		kmem_maxverify = maxverify;
3822
3823	if (kmem_minfirewall == 0)
3824		kmem_minfirewall = minfirewall;
3825
3826	/*
3827	 * give segkmem a chance to figure out if we are using large pages
3828	 * for the kernel heap
3829	 */
3830	use_large_pages = segkmem_lpsetup();
3831
3832	/*
3833	 * To protect against corruption, we keep the actual number of callers
3834	 * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
3835	 * to 16, since the overhead for small buffers quickly gets out of
3836	 * hand.
3837	 *
3838	 * The real limit would depend on the needs of the largest KMC_NOHASH
3839	 * cache.
3840	 */
3841	kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
3842	kmem_lite_pcs = kmem_lite_count;
3843
3844	/*
3845	 * Normally, we firewall oversized allocations when possible, but
3846	 * if we are using large pages for kernel memory, and we don't have
3847	 * any non-LITE debugging flags set, we want to allocate oversized
3848	 * buffers from large pages, and so skip the firewalling.
3849	 */
3850	if (use_large_pages &&
3851	    ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
3852		kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
3853		    PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
3854		    0, VM_SLEEP);
3855	} else {
3856		kmem_oversize_arena = vmem_create("kmem_oversize",
3857		    NULL, 0, PAGESIZE,
3858		    segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
3859		    kmem_firewall_va_arena : heap_arena, 0, VM_SLEEP);
3860	}
3861
3862	kmem_cache_init(2, use_large_pages);
3863
3864	if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
3865		if (kmem_transaction_log_size == 0)
3866			kmem_transaction_log_size = kmem_maxavail() / 50;
3867		kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
3868	}
3869
3870	if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
3871		if (kmem_content_log_size == 0)
3872			kmem_content_log_size = kmem_maxavail() / 50;
3873		kmem_content_log = kmem_log_init(kmem_content_log_size);
3874	}
3875
3876	kmem_failure_log = kmem_log_init(kmem_failure_log_size);
3877
3878	kmem_slab_log = kmem_log_init(kmem_slab_log_size);
3879
3880	/*
3881	 * Initialize STREAMS message caches so allocb() is available.
3882	 * This allows us to initialize the logging framework (cmn_err(9F),
3883	 * strlog(9F), etc) so we can start recording messages.
3884	 */
3885	streams_msg_init();
3886
3887	/*
3888	 * Initialize the ZSD framework in Zones so modules loaded henceforth
3889	 * can register their callbacks.
3890	 */
3891	zone_zsd_init();
3892
3893	log_init();
3894	taskq_init();
3895
3896	/*
3897	 * Warn about invalid or dangerous values of kmem_flags.
3898	 * Always warn about unsupported values.
3899	 */
3900	if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
3901	    KMF_CONTENTS | KMF_LITE)) != 0) ||
3902	    ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
3903		cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
3904		    "See the Solaris Tunable Parameters Reference Manual.",
3905		    kmem_flags);
3906
3907#ifdef DEBUG
3908	if ((kmem_flags & KMF_DEBUG) == 0)
3909		cmn_err(CE_NOTE, "kmem debugging disabled.");
3910#else
3911	/*
3912	 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
3913	 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
3914	 * if KMF_AUDIT is set). We should warn the user about the performance
3915	 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
3916	 * isn't set (since that disables AUDIT).
3917	 */
3918	if (!(kmem_flags & KMF_LITE) &&
3919	    (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
3920		cmn_err(CE_WARN, "High-overhead kmem debugging features "
3921		    "enabled (kmem_flags = 0x%x).  Performance degradation "
3922		    "and large memory overhead possible. See the Solaris "
3923		    "Tunable Parameters Reference Manual.", kmem_flags);
3924#endif /* not DEBUG */
3925
3926	kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
3927
3928	kmem_ready = 1;
3929
3930	/*
3931	 * Initialize the platform-specific aligned/DMA memory allocator.
3932	 */
3933	ka_init();
3934
3935	/*
3936	 * Initialize 32-bit ID cache.
3937	 */
3938	id32_init();
3939
3940	/*
3941	 * Initialize the networking stack so modules loaded can
3942	 * register their callbacks.
3943	 */
3944	netstack_init();
3945}
3946
3947static void
3948kmem_move_init(void)
3949{
3950	kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
3951	    sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
3952	    kmem_msb_arena, KMC_NOHASH);
3953	kmem_move_cache = kmem_cache_create("kmem_move_cache",
3954	    sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
3955	    kmem_msb_arena, KMC_NOHASH);
3956
3957	/*
3958	 * kmem guarantees that move callbacks are sequential and that even
3959	 * across multiple caches no two moves ever execute simultaneously.
3960	 * Move callbacks are processed on a separate taskq so that client code
3961	 * does not interfere with internal maintenance tasks.
3962	 */
3963	kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
3964	    minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
3965}
3966
3967void
3968kmem_thread_init(void)
3969{
3970	kmem_move_init();
3971	kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
3972	    300, INT_MAX, TASKQ_PREPOPULATE);
3973	kmem_cache_applyall(kmem_check_destructor, kmem_move_taskq,
3974	    TQ_NOSLEEP);
3975}
3976
3977void
3978kmem_mp_init(void)
3979{
3980	mutex_enter(&cpu_lock);
3981	register_cpu_setup_func(kmem_cpu_setup, NULL);
3982	mutex_exit(&cpu_lock);
3983
3984	kmem_update_timeout(NULL);
3985}
3986
3987/*
3988 * Return the slab of the allocated buffer, or NULL if the buffer is not
3989 * allocated. This function may be called with a known slab address to determine
3990 * whether or not the buffer is allocated, or with a NULL slab address to obtain
3991 * an allocated buffer's slab.
3992 */
3993static kmem_slab_t *
3994kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
3995{
3996	kmem_bufctl_t *bcp, *bufbcp;
3997
3998	ASSERT(MUTEX_HELD(&cp->cache_lock));
3999	ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4000
4001	if (cp->cache_flags & KMF_HASH) {
4002		for (bcp = *KMEM_HASH(cp, buf);
4003		    (bcp != NULL) && (bcp->bc_addr != buf);
4004		    bcp = bcp->bc_next) {
4005			continue;
4006		}
4007		ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4008		return (bcp == NULL ? NULL : bcp->bc_slab);
4009	}
4010
4011	if (sp == NULL) {
4012		sp = KMEM_SLAB(cp, buf);
4013	}
4014	bufbcp = KMEM_BUFCTL(cp, buf);
4015	for (bcp = sp->slab_head;
4016	    (bcp != NULL) && (bcp != bufbcp);
4017	    bcp = bcp->bc_next) {
4018		continue;
4019	}
4020	return (bcp == NULL ? sp : NULL);
4021}
4022
4023static boolean_t
4024kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4025{
4026	long refcnt;
4027
4028	ASSERT(cp->cache_defrag != NULL);
4029
4030	/* If we're desperate, we don't care if the client said NO. */
4031	refcnt = sp->slab_refcnt;
4032	if (flags & KMM_DESPERATE) {
4033		return (refcnt < sp->slab_chunks); /* any partial */
4034	}
4035
4036	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4037		return (B_FALSE);
4038	}
4039
4040	if (kmem_move_any_partial) {
4041		return (refcnt < sp->slab_chunks);
4042	}
4043
4044	if ((refcnt == 1) && (refcnt < sp->slab_chunks)) {
4045		return (B_TRUE);
4046	}
4047
4048	/*
4049	 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4050	 * slabs with a progressively higher percentage of used buffers can be
4051	 * reclaimed until the cache as a whole is no longer fragmented.
4052	 *
4053	 *	sp->slab_refcnt   kmd_reclaim_numer
4054	 *	--------------- < ------------------
4055	 *	sp->slab_chunks   KMEM_VOID_FRACTION
4056	 */
4057	return ((refcnt * KMEM_VOID_FRACTION) <
4058	    (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4059}
4060
4061static void *
4062kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4063    void *tbuf)
4064{
4065	int i;		/* magazine round index */
4066
4067	for (i = 0; i < n; i++) {
4068		if (buf == m->mag_round[i]) {
4069			if (cp->cache_flags & KMF_BUFTAG) {
4070				(void) kmem_cache_free_debug(cp, tbuf,
4071				    caller());
4072			}
4073			m->mag_round[i] = tbuf;
4074			return (buf);
4075		}
4076	}
4077
4078	return (NULL);
4079}
4080
4081/*
4082 * Hunt the magazine layer for the given buffer. If found, the buffer is
4083 * removed from the magazine layer and returned, otherwise NULL is returned.
4084 * The state of the returned buffer is freed and constructed.
4085 */
4086static void *
4087kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4088{
4089	kmem_cpu_cache_t *ccp;
4090	kmem_magazine_t	*m;
4091	int cpu_seqid;
4092	int n;		/* magazine rounds */
4093	void *tbuf;	/* temporary swap buffer */
4094
4095	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4096
4097	/*
4098	 * Allocated a buffer to swap with the one we hope to pull out of a
4099	 * magazine when found.
4100	 */
4101	tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4102	if (tbuf == NULL) {
4103		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail);
4104		return (NULL);
4105	}
4106	if (tbuf == buf) {
4107		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky);
4108		if (cp->cache_flags & KMF_BUFTAG) {
4109			(void) kmem_cache_free_debug(cp, buf, caller());
4110		}
4111		return (buf);
4112	}
4113
4114	/* Hunt the depot. */
4115	mutex_enter(&cp->cache_depot_lock);
4116	n = cp->cache_magtype->mt_magsize;
4117	for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4118		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4119			mutex_exit(&cp->cache_depot_lock);
4120			return (buf);
4121		}
4122	}
4123	mutex_exit(&cp->cache_depot_lock);
4124
4125	/* Hunt the per-CPU magazines. */
4126	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4127		ccp = &cp->cache_cpu[cpu_seqid];
4128
4129		mutex_enter(&ccp->cc_lock);
4130		m = ccp->cc_loaded;
4131		n = ccp->cc_rounds;
4132		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4133			mutex_exit(&ccp->cc_lock);
4134			return (buf);
4135		}
4136		m = ccp->cc_ploaded;
4137		n = ccp->cc_prounds;
4138		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4139			mutex_exit(&ccp->cc_lock);
4140			return (buf);
4141		}
4142		mutex_exit(&ccp->cc_lock);
4143	}
4144
4145	kmem_cache_free(cp, tbuf);
4146	return (NULL);
4147}
4148
4149/*
4150 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4151 * or when the buffer is freed.
4152 */
4153static void
4154kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4155{
4156	ASSERT(MUTEX_HELD(&cp->cache_lock));
4157	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4158
4159	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4160		return;
4161	}
4162
4163	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4164		if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4165			avl_remove(&cp->cache_partial_slabs, sp);
4166			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4167			sp->slab_stuck_offset = (uint32_t)-1;
4168			avl_add(&cp->cache_partial_slabs, sp);
4169		}
4170	} else {
4171		sp->slab_later_count = 0;
4172		sp->slab_stuck_offset = (uint32_t)-1;
4173	}
4174}
4175
4176static void
4177kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4178{
4179	ASSERT(taskq_member(kmem_move_taskq, curthread));
4180	ASSERT(MUTEX_HELD(&cp->cache_lock));
4181	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4182
4183	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4184		return;
4185	}
4186
4187	avl_remove(&cp->cache_partial_slabs, sp);
4188	sp->slab_later_count = 0;
4189	sp->slab_flags |= KMEM_SLAB_NOMOVE;
4190	sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4191	avl_add(&cp->cache_partial_slabs, sp);
4192}
4193
4194static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4195
4196/*
4197 * The move callback takes two buffer addresses, the buffer to be moved, and a
4198 * newly allocated and constructed buffer selected by kmem as the destination.
4199 * It also takes the size of the buffer and an optional user argument specified
4200 * at cache creation time. kmem guarantees that the buffer to be moved has not
4201 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4202 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4203 * the client to safely determine whether or not it is still using the buffer.
4204 * The client must not free either of the buffers passed to the move callback,
4205 * since kmem wants to free them directly to the slab layer. The client response
4206 * tells kmem which of the two buffers to free:
4207 *
4208 * YES		kmem frees the old buffer (the move was successful)
4209 * NO		kmem frees the new buffer, marks the slab of the old buffer
4210 *              non-reclaimable to avoid bothering the client again
4211 * LATER	kmem frees the new buffer, increments slab_later_count
4212 * DONT_KNOW	kmem frees the new buffer, searches mags for the old buffer
4213 * DONT_NEED	kmem frees both the old buffer and the new buffer
4214 *
4215 * The pending callback argument now being processed contains both of the
4216 * buffers (old and new) passed to the move callback function, the slab of the
4217 * old buffer, and flags related to the move request, such as whether or not the
4218 * system was desperate for memory.
4219 */
4220static void
4221kmem_move_buffer(kmem_move_t *callback)
4222{
4223	kmem_cbrc_t response;
4224	kmem_slab_t *sp = callback->kmm_from_slab;
4225	kmem_cache_t *cp = sp->slab_cache;
4226	boolean_t free_on_slab;
4227
4228	ASSERT(taskq_member(kmem_move_taskq, curthread));
4229	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4230	ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4231
4232	/*
4233	 * The number of allocated buffers on the slab may have changed since we
4234	 * last checked the slab's reclaimability (when the pending move was
4235	 * enqueued), or the client may have responded NO when asked to move
4236	 * another buffer on the same slab.
4237	 */
4238	if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4239		KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable);
4240		KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4241		    kmem_move_stats.kms_notify_no_longer_reclaimable);
4242		kmem_slab_free(cp, callback->kmm_to_buf);
4243		kmem_move_end(cp, callback);
4244		return;
4245	}
4246
4247	/*
4248	 * Hunting magazines is expensive, so we'll wait to do that until the
4249	 * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer
4250	 * is cheap, so we might as well do that here in case we can avoid
4251	 * bothering the client.
4252	 */
4253	mutex_enter(&cp->cache_lock);
4254	free_on_slab = (kmem_slab_allocated(cp, sp,
4255	    callback->kmm_from_buf) == NULL);
4256	mutex_exit(&cp->cache_lock);
4257
4258	if (free_on_slab) {
4259		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4260		kmem_slab_free(cp, callback->kmm_to_buf);
4261		kmem_move_end(cp, callback);
4262		return;
4263	}
4264
4265	if (cp->cache_flags & KMF_BUFTAG) {
4266		/*
4267		 * Make kmem_cache_alloc_debug() apply the constructor for us.
4268		 */
4269		if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4270		    KM_NOSLEEP, 1, caller()) != 0) {
4271			KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4272			kmem_move_end(cp, callback);
4273			return;
4274		}
4275	} else if (cp->cache_constructor != NULL &&
4276	    cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4277	    KM_NOSLEEP) != 0) {
4278		atomic_add_64(&cp->cache_alloc_fail, 1);
4279		KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4280		kmem_slab_free(cp, callback->kmm_to_buf);
4281		kmem_move_end(cp, callback);
4282		return;
4283	}
4284
4285	KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4286	KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4287	    kmem_move_stats.kms_notify_callbacks);
4288	cp->cache_defrag->kmd_callbacks++;
4289	cp->cache_defrag->kmd_thread = curthread;
4290	cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4291	cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4292	DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4293	    callback);
4294
4295	response = cp->cache_move(callback->kmm_from_buf,
4296	    callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4297
4298	DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4299	    callback, kmem_cbrc_t, response);
4300	cp->cache_defrag->kmd_thread = NULL;
4301	cp->cache_defrag->kmd_from_buf = NULL;
4302	cp->cache_defrag->kmd_to_buf = NULL;
4303
4304	if (response == KMEM_CBRC_YES) {
4305		KMEM_STAT_ADD(kmem_move_stats.kms_yes);
4306		cp->cache_defrag->kmd_yes++;
4307		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4308		mutex_enter(&cp->cache_lock);
4309		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4310		mutex_exit(&cp->cache_lock);
4311		kmem_move_end(cp, callback);
4312		return;
4313	}
4314
4315	switch (response) {
4316	case KMEM_CBRC_NO:
4317		KMEM_STAT_ADD(kmem_move_stats.kms_no);
4318		cp->cache_defrag->kmd_no++;
4319		mutex_enter(&cp->cache_lock);
4320		kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4321		mutex_exit(&cp->cache_lock);
4322		break;
4323	case KMEM_CBRC_LATER:
4324		KMEM_STAT_ADD(kmem_move_stats.kms_later);
4325		cp->cache_defrag->kmd_later++;
4326		mutex_enter(&cp->cache_lock);
4327		if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4328			mutex_exit(&cp->cache_lock);
4329			break;
4330		}
4331
4332		if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4333			KMEM_STAT_ADD(kmem_move_stats.kms_disbelief);
4334			kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4335		} else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4336			sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4337			    callback->kmm_from_buf);
4338		}
4339		mutex_exit(&cp->cache_lock);
4340		break;
4341	case KMEM_CBRC_DONT_NEED:
4342		KMEM_STAT_ADD(kmem_move_stats.kms_dont_need);
4343		cp->cache_defrag->kmd_dont_need++;
4344		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4345		mutex_enter(&cp->cache_lock);
4346		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4347		mutex_exit(&cp->cache_lock);
4348		break;
4349	case KMEM_CBRC_DONT_KNOW:
4350		KMEM_STAT_ADD(kmem_move_stats.kms_dont_know);
4351		cp->cache_defrag->kmd_dont_know++;
4352		if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4353			KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag);
4354			cp->cache_defrag->kmd_hunt_found++;
4355			kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4356			    B_TRUE);
4357			mutex_enter(&cp->cache_lock);
4358			kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4359			mutex_exit(&cp->cache_lock);
4360		} else {
4361			KMEM_STAT_ADD(kmem_move_stats.kms_hunt_notfound);
4362		}
4363		break;
4364	default:
4365		panic("'%s' (%p) unexpected move callback response %d\n",
4366		    cp->cache_name, (void *)cp, response);
4367	}
4368
4369	kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4370	kmem_move_end(cp, callback);
4371}
4372
4373/* Return B_FALSE if there is insufficient memory for the move request. */
4374static boolean_t
4375kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4376{
4377	void *to_buf;
4378	avl_index_t index;
4379	kmem_move_t *callback, *pending;
4380
4381	ASSERT(taskq_member(kmem_taskq, curthread));
4382	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4383	ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4384
4385	callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4386	if (callback == NULL) {
4387		KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail);
4388		return (B_FALSE);
4389	}
4390
4391	callback->kmm_from_slab = sp;
4392	callback->kmm_from_buf = buf;
4393	callback->kmm_flags = flags;
4394
4395	mutex_enter(&cp->cache_lock);
4396
4397	if (avl_numnodes(&cp->cache_partial_slabs) <= 1) {
4398		mutex_exit(&cp->cache_lock);
4399		kmem_cache_free(kmem_move_cache, callback);
4400		return (B_TRUE); /* there is no need for the move request */
4401	}
4402
4403	pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4404	if (pending != NULL) {
4405		/*
4406		 * If the move is already pending and we're desperate now,
4407		 * update the move flags.
4408		 */
4409		if (flags & KMM_DESPERATE) {
4410			pending->kmm_flags |= KMM_DESPERATE;
4411		}
4412		mutex_exit(&cp->cache_lock);
4413		KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
4414		kmem_cache_free(kmem_move_cache, callback);
4415		return (B_TRUE);
4416	}
4417
4418	to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs));
4419	callback->kmm_to_buf = to_buf;
4420	avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4421
4422	mutex_exit(&cp->cache_lock);
4423
4424	if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4425	    callback, TQ_NOSLEEP)) {
4426		mutex_enter(&cp->cache_lock);
4427		avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4428		mutex_exit(&cp->cache_lock);
4429		kmem_slab_free_constructed(cp, to_buf, B_FALSE);
4430		kmem_cache_free(kmem_move_cache, callback);
4431		return (B_FALSE);
4432	}
4433
4434	return (B_TRUE);
4435}
4436
4437static void
4438kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4439{
4440	avl_index_t index;
4441
4442	ASSERT(cp->cache_defrag != NULL);
4443	ASSERT(taskq_member(kmem_move_taskq, curthread));
4444	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4445
4446	mutex_enter(&cp->cache_lock);
4447	VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4448	    callback->kmm_from_buf, &index) != NULL);
4449	avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4450	if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4451		list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4452		kmem_slab_t *sp;
4453
4454		/*
4455		 * The last pending move completed. Release all slabs from the
4456		 * front of the dead list except for any slab at the tail that
4457		 * needs to be released from the context of kmem_move_buffers().
4458		 * kmem deferred unmapping the buffers on these slabs in order
4459		 * to guarantee that buffers passed to the move callback have
4460		 * been touched only by kmem or by the client itself.
4461		 */
4462		while ((sp = list_remove_head(deadlist)) != NULL) {
4463			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4464				list_insert_tail(deadlist, sp);
4465				break;
4466			}
4467			cp->cache_defrag->kmd_deadcount--;
4468			cp->cache_slab_destroy++;
4469			mutex_exit(&cp->cache_lock);
4470			kmem_slab_destroy(cp, sp);
4471			KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
4472			mutex_enter(&cp->cache_lock);
4473		}
4474	}
4475	mutex_exit(&cp->cache_lock);
4476	kmem_cache_free(kmem_move_cache, callback);
4477}
4478
4479/*
4480 * Move buffers from least used slabs first by scanning backwards from the end
4481 * of the partial slab list. Scan at most max_scan candidate slabs and move
4482 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4483 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4484 * skip slabs with a ratio of allocated buffers at or above the current
4485 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4486 * scan is aborted) so that the caller can adjust the reclaimability threshold
4487 * depending on how many reclaimable slabs it finds.
4488 *
4489 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4490 * move request, since it is not valid for kmem_move_begin() to call
4491 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4492 */
4493static int
4494kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4495    int flags)
4496{
4497	kmem_slab_t *sp;
4498	void *buf;
4499	int i, j; /* slab index, buffer index */
4500	int s; /* reclaimable slabs */
4501	int b; /* allocated (movable) buffers on reclaimable slab */
4502	boolean_t success;
4503	int refcnt;
4504	int nomove;
4505
4506	ASSERT(taskq_member(kmem_taskq, curthread));
4507	ASSERT(MUTEX_HELD(&cp->cache_lock));
4508	ASSERT(kmem_move_cache != NULL);
4509	ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4510	ASSERT(avl_numnodes(&cp->cache_partial_slabs) > 1);
4511
4512	if (kmem_move_blocked) {
4513		return (0);
4514	}
4515
4516	if (kmem_move_fulltilt) {
4517		max_slabs = 0;
4518		flags |= KMM_DESPERATE;
4519	}
4520
4521	if (max_scan == 0 || (flags & KMM_DESPERATE)) {
4522		/*
4523		 * Scan as many slabs as needed to find the desired number of
4524		 * candidate slabs.
4525		 */
4526		max_scan = (size_t)-1;
4527	}
4528
4529	if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
4530		/* Find as many candidate slabs as possible. */
4531		max_slabs = (size_t)-1;
4532	}
4533
4534	sp = avl_last(&cp->cache_partial_slabs);
4535	ASSERT(sp != NULL && KMEM_SLAB_IS_PARTIAL(sp));
4536	for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) &&
4537	    (sp != avl_first(&cp->cache_partial_slabs));
4538	    sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
4539
4540		if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
4541			continue;
4542		}
4543		s++;
4544
4545		/* Look for allocated buffers to move. */
4546		for (j = 0, b = 0, buf = sp->slab_base;
4547		    (j < sp->slab_chunks) && (b < sp->slab_refcnt);
4548		    buf = (((char *)buf) + cp->cache_chunksize), j++) {
4549
4550			if (kmem_slab_allocated(cp, sp, buf) == NULL) {
4551				continue;
4552			}
4553
4554			b++;
4555
4556			/*
4557			 * Prevent the slab from being destroyed while we drop
4558			 * cache_lock and while the pending move is not yet
4559			 * registered. Flag the pending move while
4560			 * kmd_moves_pending may still be empty, since we can't
4561			 * yet rely on a non-zero pending move count to prevent
4562			 * the slab from being destroyed.
4563			 */
4564			ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
4565			sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
4566			/*
4567			 * Recheck refcnt and nomove after reacquiring the lock,
4568			 * since these control the order of partial slabs, and
4569			 * we want to know if we can pick up the scan where we
4570			 * left off.
4571			 */
4572			refcnt = sp->slab_refcnt;
4573			nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
4574			mutex_exit(&cp->cache_lock);
4575
4576			success = kmem_move_begin(cp, sp, buf, flags);
4577
4578			/*
4579			 * Now, before the lock is reacquired, kmem could
4580			 * process all pending move requests and purge the
4581			 * deadlist, so that upon reacquiring the lock, sp has
4582			 * been remapped. Therefore, the KMEM_SLAB_MOVE_PENDING
4583			 * flag causes the slab to be put at the end of the
4584			 * deadlist and prevents it from being purged, since we
4585			 * plan to destroy it here after reacquiring the lock.
4586			 */
4587			mutex_enter(&cp->cache_lock);
4588			ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4589			sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
4590
4591			/*
4592			 * Destroy the slab now if it was completely freed while
4593			 * we dropped cache_lock.
4594			 */
4595			if (sp->slab_refcnt == 0) {
4596				list_t *deadlist =
4597				    &cp->cache_defrag->kmd_deadlist;
4598
4599				ASSERT(!list_is_empty(deadlist));
4600				ASSERT(list_link_active((list_node_t *)
4601				    &sp->slab_link));
4602
4603				list_remove(deadlist, sp);
4604				cp->cache_defrag->kmd_deadcount--;
4605				cp->cache_slab_destroy++;
4606				mutex_exit(&cp->cache_lock);
4607				kmem_slab_destroy(cp, sp);
4608				KMEM_STAT_ADD(kmem_move_stats.
4609				    kms_dead_slabs_freed);
4610				KMEM_STAT_ADD(kmem_move_stats.
4611				    kms_endscan_slab_destroyed);
4612				mutex_enter(&cp->cache_lock);
4613				/*
4614				 * Since we can't pick up the scan where we left
4615				 * off, abort the scan and say nothing about the
4616				 * number of reclaimable slabs.
4617				 */
4618				return (-1);
4619			}
4620
4621			if (!success) {
4622				/*
4623				 * Abort the scan if there is not enough memory
4624				 * for the request and say nothing about the
4625				 * number of reclaimable slabs.
4626				 */
4627				KMEM_STAT_ADD(
4628				    kmem_move_stats.kms_endscan_nomem);
4629				return (-1);
4630			}
4631
4632			/*
4633			 * The slab may have been completely allocated while the
4634			 * lock was dropped.
4635			 */
4636			if (KMEM_SLAB_IS_ALL_USED(sp)) {
4637				KMEM_STAT_ADD(
4638				    kmem_move_stats.kms_endscan_slab_all_used);
4639				return (-1);
4640			}
4641
4642			/*
4643			 * The slab's position changed while the lock was
4644			 * dropped, so we don't know where we are in the
4645			 * sequence any more.
4646			 */
4647			if (sp->slab_refcnt != refcnt) {
4648				KMEM_STAT_ADD(
4649				    kmem_move_stats.kms_endscan_refcnt_changed);
4650				return (-1);
4651			}
4652			if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) {
4653				KMEM_STAT_ADD(
4654				    kmem_move_stats.kms_endscan_nomove_changed);
4655				return (-1);
4656			}
4657
4658			/*
4659			 * Generating a move request allocates a destination
4660			 * buffer from the slab layer, bumping the first slab if
4661			 * it is completely allocated.
4662			 */
4663			ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
4664			if (sp == avl_first(&cp->cache_partial_slabs)) {
4665				goto end_scan;
4666			}
4667		}
4668	}
4669end_scan:
4670
4671	KMEM_STAT_COND_ADD(sp == avl_first(&cp->cache_partial_slabs),
4672	    kmem_move_stats.kms_endscan_freelist);
4673
4674	return (s);
4675}
4676
4677typedef struct kmem_move_notify_args {
4678	kmem_cache_t *kmna_cache;
4679	void *kmna_buf;
4680} kmem_move_notify_args_t;
4681
4682static void
4683kmem_cache_move_notify_task(void *arg)
4684{
4685	kmem_move_notify_args_t *args = arg;
4686	kmem_cache_t *cp = args->kmna_cache;
4687	void *buf = args->kmna_buf;
4688	kmem_slab_t *sp;
4689
4690	ASSERT(taskq_member(kmem_taskq, curthread));
4691	ASSERT(list_link_active(&cp->cache_link));
4692
4693	kmem_free(args, sizeof (kmem_move_notify_args_t));
4694	mutex_enter(&cp->cache_lock);
4695	sp = kmem_slab_allocated(cp, NULL, buf);
4696
4697	/* Ignore the notification if the buffer is no longer allocated. */
4698	if (sp == NULL) {
4699		mutex_exit(&cp->cache_lock);
4700		return;
4701	}
4702
4703	/* Ignore the notification if there's no reason to move the buffer. */
4704	if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
4705		/*
4706		 * So far the notification is not ignored. Ignore the
4707		 * notification if the slab is not marked by an earlier refusal
4708		 * to move a buffer.
4709		 */
4710		if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
4711		    (sp->slab_later_count == 0)) {
4712			mutex_exit(&cp->cache_lock);
4713			return;
4714		}
4715
4716		kmem_slab_move_yes(cp, sp, buf);
4717		ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
4718		sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
4719		mutex_exit(&cp->cache_lock);
4720		/* see kmem_move_buffers() about dropping the lock */
4721		(void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
4722		mutex_enter(&cp->cache_lock);
4723		ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4724		sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
4725		if (sp->slab_refcnt == 0) {
4726			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4727
4728			ASSERT(!list_is_empty(deadlist));
4729			ASSERT(list_link_active((list_node_t *)
4730			    &sp->slab_link));
4731
4732			list_remove(deadlist, sp);
4733			cp->cache_defrag->kmd_deadcount--;
4734			cp->cache_slab_destroy++;
4735			mutex_exit(&cp->cache_lock);
4736			kmem_slab_destroy(cp, sp);
4737			KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
4738			return;
4739		}
4740	} else {
4741		kmem_slab_move_yes(cp, sp, buf);
4742	}
4743	mutex_exit(&cp->cache_lock);
4744}
4745
4746void
4747kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
4748{
4749	kmem_move_notify_args_t *args;
4750
4751	KMEM_STAT_ADD(kmem_move_stats.kms_notify);
4752	args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
4753	if (args != NULL) {
4754		args->kmna_cache = cp;
4755		args->kmna_buf = buf;
4756		(void) taskq_dispatch(kmem_taskq,
4757		    (task_func_t *)kmem_cache_move_notify_task, args,
4758		    TQ_NOSLEEP);
4759	}
4760}
4761
4762static void
4763kmem_cache_defrag(kmem_cache_t *cp)
4764{
4765	size_t n;
4766
4767	ASSERT(cp->cache_defrag != NULL);
4768
4769	mutex_enter(&cp->cache_lock);
4770	n = avl_numnodes(&cp->cache_partial_slabs);
4771	if (n > 1) {
4772		/* kmem_move_buffers() drops and reacquires cache_lock */
4773		(void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
4774		KMEM_STAT_ADD(kmem_move_stats.kms_defrags);
4775	}
4776	mutex_exit(&cp->cache_lock);
4777}
4778
4779/* Is this cache above the fragmentation threshold? */
4780static boolean_t
4781kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
4782{
4783	if (avl_numnodes(&cp->cache_partial_slabs) <= 1)
4784		return (B_FALSE);
4785
4786	/*
4787	 *	nfree		kmem_frag_numer
4788	 * ------------------ > ---------------
4789	 * cp->cache_buftotal	kmem_frag_denom
4790	 */
4791	return ((nfree * kmem_frag_denom) >
4792	    (cp->cache_buftotal * kmem_frag_numer));
4793}
4794
4795static boolean_t
4796kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
4797{
4798	boolean_t fragmented;
4799	uint64_t nfree;
4800
4801	ASSERT(MUTEX_HELD(&cp->cache_lock));
4802	*doreap = B_FALSE;
4803
4804	if (!kmem_move_fulltilt && ((cp->cache_complete_slab_count +
4805	    avl_numnodes(&cp->cache_partial_slabs)) < kmem_frag_minslabs))
4806		return (B_FALSE);
4807
4808	nfree = cp->cache_bufslab;
4809	fragmented = kmem_cache_frag_threshold(cp, nfree);
4810	/*
4811	 * Free buffers in the magazine layer appear allocated from the point of
4812	 * view of the slab layer. We want to know if the slab layer would
4813	 * appear fragmented if we included free buffers from magazines that
4814	 * have fallen out of the working set.
4815	 */
4816	if (!fragmented) {
4817		long reap;
4818
4819		mutex_enter(&cp->cache_depot_lock);
4820		reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
4821		reap = MIN(reap, cp->cache_full.ml_total);
4822		mutex_exit(&cp->cache_depot_lock);
4823
4824		nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
4825		if (kmem_cache_frag_threshold(cp, nfree)) {
4826			*doreap = B_TRUE;
4827		}
4828	}
4829
4830	return (fragmented);
4831}
4832
4833/* Called periodically from kmem_taskq */
4834static void
4835kmem_cache_scan(kmem_cache_t *cp)
4836{
4837	boolean_t reap = B_FALSE;
4838
4839	ASSERT(taskq_member(kmem_taskq, curthread));
4840	ASSERT(cp->cache_defrag != NULL);
4841
4842	mutex_enter(&cp->cache_lock);
4843
4844	if (kmem_cache_is_fragmented(cp, &reap)) {
4845		kmem_defrag_t *kmd = cp->cache_defrag;
4846		size_t slabs_found;
4847
4848		/*
4849		 * Consolidate reclaimable slabs from the end of the partial
4850		 * slab list (scan at most kmem_reclaim_scan_range slabs to find
4851		 * reclaimable slabs). Keep track of how many candidate slabs we
4852		 * looked for and how many we actually found so we can adjust
4853		 * the definition of a candidate slab if we're having trouble
4854		 * finding them.
4855		 *
4856		 * kmem_move_buffers() drops and reacquires cache_lock.
4857		 */
4858		slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
4859		    kmem_reclaim_max_slabs, 0);
4860		if (slabs_found >= 0) {
4861			kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
4862			kmd->kmd_slabs_found += slabs_found;
4863		}
4864
4865		if (++kmd->kmd_scans >= kmem_reclaim_scan_range) {
4866			kmd->kmd_scans = 0;
4867
4868			/*
4869			 * If we had difficulty finding candidate slabs in
4870			 * previous scans, adjust the threshold so that
4871			 * candidates are easier to find.
4872			 */
4873			if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
4874				kmem_adjust_reclaim_threshold(kmd, -1);
4875			} else if ((kmd->kmd_slabs_found * 2) <
4876			    kmd->kmd_slabs_sought) {
4877				kmem_adjust_reclaim_threshold(kmd, 1);
4878			}
4879			kmd->kmd_slabs_sought = 0;
4880			kmd->kmd_slabs_found = 0;
4881		}
4882	} else {
4883		kmem_reset_reclaim_threshold(cp->cache_defrag);
4884#ifdef	DEBUG
4885		if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
4886			/*
4887			 * In a debug kernel we want the consolidator to
4888			 * run occasionally even when there is plenty of
4889			 * memory.
4890			 */
4891			uint32_t debug_rand;
4892
4893			(void) random_get_bytes((uint8_t *)&debug_rand, 4);
4894			if (!kmem_move_noreap &&
4895			    ((debug_rand % kmem_mtb_reap) == 0)) {
4896				mutex_exit(&cp->cache_lock);
4897				kmem_cache_reap(cp);
4898				KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps);
4899				return;
4900			} else if ((debug_rand % kmem_mtb_move) == 0) {
4901				(void) kmem_move_buffers(cp,
4902				    kmem_reclaim_scan_range, 1, 0);
4903				KMEM_STAT_ADD(kmem_move_stats.
4904				    kms_debug_move_scans);
4905			}
4906		}
4907#endif	/* DEBUG */
4908	}
4909
4910	mutex_exit(&cp->cache_lock);
4911
4912	if (reap) {
4913		KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps);
4914		kmem_depot_ws_reap(cp);
4915	}
4916}
4917