1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * VM - Hardware Address Translation management for Spitfire MMU.
27 *
28 * This file implements the machine specific hardware translation
29 * needed by the VM system.  The machine independent interface is
30 * described in <vm/hat.h> while the machine dependent interface
31 * and data structures are described in <vm/hat_sfmmu.h>.
32 *
33 * The hat layer manages the address translation hardware as a cache
34 * driven by calls from the higher levels in the VM system.
35 */
36
37#include <sys/types.h>
38#include <sys/kstat.h>
39#include <vm/hat.h>
40#include <vm/hat_sfmmu.h>
41#include <vm/page.h>
42#include <sys/pte.h>
43#include <sys/systm.h>
44#include <sys/mman.h>
45#include <sys/sysmacros.h>
46#include <sys/machparam.h>
47#include <sys/vtrace.h>
48#include <sys/kmem.h>
49#include <sys/mmu.h>
50#include <sys/cmn_err.h>
51#include <sys/cpu.h>
52#include <sys/cpuvar.h>
53#include <sys/debug.h>
54#include <sys/lgrp.h>
55#include <sys/archsystm.h>
56#include <sys/machsystm.h>
57#include <sys/vmsystm.h>
58#include <vm/as.h>
59#include <vm/seg.h>
60#include <vm/seg_kp.h>
61#include <vm/seg_kmem.h>
62#include <vm/seg_kpm.h>
63#include <vm/rm.h>
64#include <sys/t_lock.h>
65#include <sys/obpdefs.h>
66#include <sys/vm_machparam.h>
67#include <sys/var.h>
68#include <sys/trap.h>
69#include <sys/machtrap.h>
70#include <sys/scb.h>
71#include <sys/bitmap.h>
72#include <sys/machlock.h>
73#include <sys/membar.h>
74#include <sys/atomic.h>
75#include <sys/cpu_module.h>
76#include <sys/prom_debug.h>
77#include <sys/ksynch.h>
78#include <sys/mem_config.h>
79#include <sys/mem_cage.h>
80#include <vm/vm_dep.h>
81#include <vm/xhat_sfmmu.h>
82#include <sys/fpu/fpusystm.h>
83#include <vm/mach_kpm.h>
84#include <sys/callb.h>
85
86#ifdef	DEBUG
87#define	SFMMU_VALIDATE_HMERID(hat, rid, saddr, len)			\
88	if (SFMMU_IS_SHMERID_VALID(rid)) {				\
89		caddr_t _eaddr = (saddr) + (len);			\
90		sf_srd_t *_srdp;					\
91		sf_region_t *_rgnp;					\
92		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			\
93		ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid));	\
94		ASSERT((hat) != ksfmmup);				\
95		_srdp = (hat)->sfmmu_srdp;				\
96		ASSERT(_srdp != NULL);					\
97		ASSERT(_srdp->srd_refcnt != 0);				\
98		_rgnp = _srdp->srd_hmergnp[(rid)];			\
99		ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid);		\
100		ASSERT(_rgnp->rgn_refcnt != 0);				\
101		ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE));	\
102		ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	\
103		    SFMMU_REGION_HME);					\
104		ASSERT((saddr) >= _rgnp->rgn_saddr);			\
105		ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size);	\
106		ASSERT(_eaddr > _rgnp->rgn_saddr);			\
107		ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size);	\
108	}
109
110#define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 	 	 \
111{						 			 \
112		caddr_t _hsva;						 \
113		caddr_t _heva;						 \
114		caddr_t _rsva;					 	 \
115		caddr_t _reva;					 	 \
116		int	_ttesz = get_hblk_ttesz(hmeblkp);		 \
117		int	_flagtte;					 \
118		ASSERT((srdp)->srd_refcnt != 0);			 \
119		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			 \
120		ASSERT((rgnp)->rgn_id == rid);				 \
121		ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));	 \
122		ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	 \
123		    SFMMU_REGION_HME);					 \
124		ASSERT(_ttesz <= (rgnp)->rgn_pgszc);			 \
125		_hsva = (caddr_t)get_hblk_base(hmeblkp);		 \
126		_heva = get_hblk_endaddr(hmeblkp);			 \
127		_rsva = (caddr_t)P2ALIGN(				 \
128		    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);	 \
129		_reva = (caddr_t)P2ROUNDUP(				 \
130		    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),	 \
131		    HBLK_MIN_BYTES);					 \
132		ASSERT(_hsva >= _rsva);				 	 \
133		ASSERT(_hsva < _reva);				 	 \
134		ASSERT(_heva > _rsva);				 	 \
135		ASSERT(_heva <= _reva);				 	 \
136		_flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ :  \
137			_ttesz;						 \
138		ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));		 \
139}
140
141#else /* DEBUG */
142#define	SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
143#define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
144#endif /* DEBUG */
145
146#if defined(SF_ERRATA_57)
147extern caddr_t errata57_limit;
148#endif
149
150#define	HME8BLK_SZ_RND		((roundup(HME8BLK_SZ, sizeof (int64_t))) /  \
151				(sizeof (int64_t)))
152#define	HBLK_RESERVE		((struct hme_blk *)hblk_reserve)
153
154#define	HBLK_RESERVE_CNT	128
155#define	HBLK_RESERVE_MIN	20
156
157static struct hme_blk		*freehblkp;
158static kmutex_t			freehblkp_lock;
159static int			freehblkcnt;
160
161static int64_t			hblk_reserve[HME8BLK_SZ_RND];
162static kmutex_t			hblk_reserve_lock;
163static kthread_t		*hblk_reserve_thread;
164
165static nucleus_hblk8_info_t	nucleus_hblk8;
166static nucleus_hblk1_info_t	nucleus_hblk1;
167
168/*
169 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
170 * after the initial phase of removing an hmeblk from the hash chain, see
171 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
172 */
173static cpu_hme_pend_t		*cpu_hme_pend;
174static uint_t			cpu_hme_pend_thresh;
175/*
176 * SFMMU specific hat functions
177 */
178void	hat_pagecachectl(struct page *, int);
179
180/* flags for hat_pagecachectl */
181#define	HAT_CACHE	0x1
182#define	HAT_UNCACHE	0x2
183#define	HAT_TMPNC	0x4
184
185/*
186 * Flag to allow the creation of non-cacheable translations
187 * to system memory. It is off by default. At the moment this
188 * flag is used by the ecache error injector. The error injector
189 * will turn it on when creating such a translation then shut it
190 * off when it's finished.
191 */
192
193int	sfmmu_allow_nc_trans = 0;
194
195/*
196 * Flag to disable large page support.
197 * 	value of 1 => disable all large pages.
198 *	bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
199 *
200 * For example, use the value 0x4 to disable 512K pages.
201 *
202 */
203#define	LARGE_PAGES_OFF		0x1
204
205/*
206 * The disable_large_pages and disable_ism_large_pages variables control
207 * hat_memload_array and the page sizes to be used by ISM and the kernel.
208 *
209 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
210 * are only used to control which OOB pages to use at upper VM segment creation
211 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
212 * Their values may come from platform or CPU specific code to disable page
213 * sizes that should not be used.
214 *
215 * WARNING: 512K pages are currently not supported for ISM/DISM.
216 */
217uint_t	disable_large_pages = 0;
218uint_t	disable_ism_large_pages = (1 << TTE512K);
219uint_t	disable_auto_data_large_pages = 0;
220uint_t	disable_auto_text_large_pages = 0;
221
222/*
223 * Private sfmmu data structures for hat management
224 */
225static struct kmem_cache *sfmmuid_cache;
226static struct kmem_cache *mmuctxdom_cache;
227
228/*
229 * Private sfmmu data structures for tsb management
230 */
231static struct kmem_cache *sfmmu_tsbinfo_cache;
232static struct kmem_cache *sfmmu_tsb8k_cache;
233static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
234static vmem_t *kmem_bigtsb_arena;
235static vmem_t *kmem_tsb_arena;
236
237/*
238 * sfmmu static variables for hmeblk resource management.
239 */
240static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
241static struct kmem_cache *sfmmu8_cache;
242static struct kmem_cache *sfmmu1_cache;
243static struct kmem_cache *pa_hment_cache;
244
245static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
246/*
247 * private data for ism
248 */
249static struct kmem_cache *ism_blk_cache;
250static struct kmem_cache *ism_ment_cache;
251#define	ISMID_STARTADDR	NULL
252
253/*
254 * Region management data structures and function declarations.
255 */
256
257static void	sfmmu_leave_srd(sfmmu_t *);
258static int	sfmmu_srdcache_constructor(void *, void *, int);
259static void	sfmmu_srdcache_destructor(void *, void *);
260static int	sfmmu_rgncache_constructor(void *, void *, int);
261static void	sfmmu_rgncache_destructor(void *, void *);
262static int	sfrgnmap_isnull(sf_region_map_t *);
263static int	sfhmergnmap_isnull(sf_hmeregion_map_t *);
264static int	sfmmu_scdcache_constructor(void *, void *, int);
265static void	sfmmu_scdcache_destructor(void *, void *);
266static void	sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
267    size_t, void *, u_offset_t);
268
269static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
270static sf_srd_bucket_t *srd_buckets;
271static struct kmem_cache *srd_cache;
272static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
273static struct kmem_cache *region_cache;
274static struct kmem_cache *scd_cache;
275
276#ifdef sun4v
277int use_bigtsb_arena = 1;
278#else
279int use_bigtsb_arena = 0;
280#endif
281
282/* External /etc/system tunable, for turning on&off the shctx support */
283int disable_shctx = 0;
284/* Internal variable, set by MD if the HW supports shctx feature */
285int shctx_on = 0;
286
287#ifdef DEBUG
288static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
289#endif
290static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
291static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
292
293static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
294static void sfmmu_find_scd(sfmmu_t *);
295static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
296static void sfmmu_finish_join_scd(sfmmu_t *);
297static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
298static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
299static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
300static void sfmmu_free_scd_tsbs(sfmmu_t *);
301static void sfmmu_tsb_inv_ctx(sfmmu_t *);
302static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
303static void sfmmu_ism_hatflags(sfmmu_t *, int);
304static int sfmmu_srd_lock_held(sf_srd_t *);
305static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
306static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
307static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
308static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
309static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
310static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
311
312/*
313 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
314 * HAT flags, synchronizing TLB/TSB coherency, and context management.
315 * The lock is hashed on the sfmmup since the case where we need to lock
316 * all processes is rare but does occur (e.g. we need to unload a shared
317 * mapping from all processes using the mapping).  We have a lot of buckets,
318 * and each slab of sfmmu_t's can use about a quarter of them, giving us
319 * a fairly good distribution without wasting too much space and overhead
320 * when we have to grab them all.
321 */
322#define	SFMMU_NUM_LOCK	128		/* must be power of two */
323hatlock_t	hat_lock[SFMMU_NUM_LOCK];
324
325/*
326 * Hash algorithm optimized for a small number of slabs.
327 *  7 is (highbit((sizeof sfmmu_t)) - 1)
328 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
329 * kmem_cache, and thus they will be sequential within that cache.  In
330 * addition, each new slab will have a different "color" up to cache_maxcolor
331 * which will skew the hashing for each successive slab which is allocated.
332 * If the size of sfmmu_t changed to a larger size, this algorithm may need
333 * to be revisited.
334 */
335#define	TSB_HASH_SHIFT_BITS (7)
336#define	PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
337
338#ifdef DEBUG
339int tsb_hash_debug = 0;
340#define	TSB_HASH(sfmmup)	\
341	(tsb_hash_debug ? &hat_lock[0] : \
342	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
343#else	/* DEBUG */
344#define	TSB_HASH(sfmmup)	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
345#endif	/* DEBUG */
346
347
348/* sfmmu_replace_tsb() return codes. */
349typedef enum tsb_replace_rc {
350	TSB_SUCCESS,
351	TSB_ALLOCFAIL,
352	TSB_LOSTRACE,
353	TSB_ALREADY_SWAPPED,
354	TSB_CANTGROW
355} tsb_replace_rc_t;
356
357/*
358 * Flags for TSB allocation routines.
359 */
360#define	TSB_ALLOC	0x01
361#define	TSB_FORCEALLOC	0x02
362#define	TSB_GROW	0x04
363#define	TSB_SHRINK	0x08
364#define	TSB_SWAPIN	0x10
365
366/*
367 * Support for HAT callbacks.
368 */
369#define	SFMMU_MAX_RELOC_CALLBACKS	10
370int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
371static id_t sfmmu_cb_nextid = 0;
372static id_t sfmmu_tsb_cb_id;
373struct sfmmu_callback *sfmmu_cb_table;
374
375/*
376 * Kernel page relocation is enabled by default for non-caged
377 * kernel pages.  This has little effect unless segkmem_reloc is
378 * set, since by default kernel memory comes from inside the
379 * kernel cage.
380 */
381int hat_kpr_enabled = 1;
382
383kmutex_t	kpr_mutex;
384kmutex_t	kpr_suspendlock;
385kthread_t	*kreloc_thread;
386
387/*
388 * Enable VA->PA translation sanity checking on DEBUG kernels.
389 * Disabled by default.  This is incompatible with some
390 * drivers (error injector, RSM) so if it breaks you get
391 * to keep both pieces.
392 */
393int hat_check_vtop = 0;
394
395/*
396 * Private sfmmu routines (prototypes)
397 */
398static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
399static struct 	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
400			struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
401			uint_t);
402static caddr_t	sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
403			caddr_t, demap_range_t *, uint_t);
404static caddr_t	sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
405			caddr_t, int);
406static void	sfmmu_hblk_free(struct hme_blk **);
407static void	sfmmu_hblks_list_purge(struct hme_blk **, int);
408static uint_t	sfmmu_get_free_hblk(struct hme_blk **, uint_t);
409static uint_t	sfmmu_put_free_hblk(struct hme_blk *, uint_t);
410static struct hme_blk *sfmmu_hblk_steal(int);
411static int	sfmmu_steal_this_hblk(struct hmehash_bucket *,
412			struct hme_blk *, uint64_t, struct hme_blk *);
413static caddr_t	sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
414
415static void	hat_do_memload_array(struct hat *, caddr_t, size_t,
416		    struct page **, uint_t, uint_t, uint_t);
417static void	hat_do_memload(struct hat *, caddr_t, struct page *,
418		    uint_t, uint_t, uint_t);
419static void	sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
420		    uint_t, uint_t, pgcnt_t, uint_t);
421void		sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
422			uint_t);
423static int	sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
424			uint_t, uint_t);
425static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
426					caddr_t, int, uint_t);
427static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
428			struct hmehash_bucket *, caddr_t, uint_t, uint_t,
429			uint_t);
430static int	sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
431			caddr_t, page_t **, uint_t, uint_t);
432static void	sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
433
434static int	sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
435static pfn_t	sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
436void		sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
437#ifdef VAC
438static void	sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
439static int	sfmmu_vacconflict_array(caddr_t, page_t *, int *);
440int	tst_tnc(page_t *pp, pgcnt_t);
441void	conv_tnc(page_t *pp, int);
442#endif
443
444static void	sfmmu_get_ctx(sfmmu_t *);
445static void	sfmmu_free_sfmmu(sfmmu_t *);
446
447static void	sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
448static void	sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
449
450cpuset_t	sfmmu_pageunload(page_t *, struct sf_hment *, int);
451static void	hat_pagereload(struct page *, struct page *);
452static cpuset_t	sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
453#ifdef VAC
454void	sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
455static void	sfmmu_page_cache(page_t *, int, int, int);
456#endif
457
458cpuset_t	sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
459    struct hme_blk *, int);
460static void	sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
461			pfn_t, int, int, int, int);
462static void	sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
463			pfn_t, int);
464static void	sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
465static void	sfmmu_tlb_range_demap(demap_range_t *);
466static void	sfmmu_invalidate_ctx(sfmmu_t *);
467static void	sfmmu_sync_mmustate(sfmmu_t *);
468
469static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
470static int	sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
471			sfmmu_t *);
472static void	sfmmu_tsb_free(struct tsb_info *);
473static void	sfmmu_tsbinfo_free(struct tsb_info *);
474static int	sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
475			sfmmu_t *);
476static void	sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
477static void	sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
478static int	sfmmu_select_tsb_szc(pgcnt_t);
479static void	sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
480#define		sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
481	sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
482#define		sfmmu_unload_tsb(sfmmup, vaddr, szc)    \
483	sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
484static void	sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
485static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
486    hatlock_t *, uint_t);
487static void	sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
488
489#ifdef VAC
490void	sfmmu_cache_flush(pfn_t, int);
491void	sfmmu_cache_flushcolor(int, pfn_t);
492#endif
493static caddr_t	sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
494			caddr_t, demap_range_t *, uint_t, int);
495
496static uint64_t	sfmmu_vtop_attr(uint_t, int mode, tte_t *);
497static uint_t	sfmmu_ptov_attr(tte_t *);
498static caddr_t	sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
499			caddr_t, demap_range_t *, uint_t);
500static uint_t	sfmmu_vtop_prot(uint_t, uint_t *);
501static int	sfmmu_idcache_constructor(void *, void *, int);
502static void	sfmmu_idcache_destructor(void *, void *);
503static int	sfmmu_hblkcache_constructor(void *, void *, int);
504static void	sfmmu_hblkcache_destructor(void *, void *);
505static void	sfmmu_hblkcache_reclaim(void *);
506static void	sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
507			struct hmehash_bucket *);
508static void	sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
509			struct hme_blk *, struct hme_blk **, int);
510static void	sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
511			uint64_t);
512static struct hme_blk *sfmmu_check_pending_hblks(int);
513static void	sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
514static void	sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
515static void	sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
516			int, caddr_t *);
517static void	sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
518
519static void	sfmmu_rm_large_mappings(page_t *, int);
520
521static void	hat_lock_init(void);
522static void	hat_kstat_init(void);
523static int	sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
524static void	sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
525static	int	sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
526static void	sfmmu_check_page_sizes(sfmmu_t *, int);
527int	fnd_mapping_sz(page_t *);
528static void	iment_add(struct ism_ment *,  struct hat *);
529static void	iment_sub(struct ism_ment *, struct hat *);
530static pgcnt_t	ism_tsb_entries(sfmmu_t *, int szc);
531extern void	sfmmu_setup_tsbinfo(sfmmu_t *);
532extern void	sfmmu_clear_utsbinfo(void);
533
534static void		sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
535
536extern int vpm_enable;
537
538/* kpm globals */
539#ifdef	DEBUG
540/*
541 * Enable trap level tsbmiss handling
542 */
543int	kpm_tsbmtl = 1;
544
545/*
546 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
547 * required TLB shootdowns in this case, so handle w/ care. Off by default.
548 */
549int	kpm_tlb_flush;
550#endif	/* DEBUG */
551
552static void	*sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
553
554#ifdef DEBUG
555static void	sfmmu_check_hblk_flist();
556#endif
557
558/*
559 * Semi-private sfmmu data structures.  Some of them are initialize in
560 * startup or in hat_init. Some of them are private but accessed by
561 * assembly code or mach_sfmmu.c
562 */
563struct hmehash_bucket *uhme_hash;	/* user hmeblk hash table */
564struct hmehash_bucket *khme_hash;	/* kernel hmeblk hash table */
565uint64_t	uhme_hash_pa;		/* PA of uhme_hash */
566uint64_t	khme_hash_pa;		/* PA of khme_hash */
567int 		uhmehash_num;		/* # of buckets in user hash table */
568int 		khmehash_num;		/* # of buckets in kernel hash table */
569
570uint_t		max_mmu_ctxdoms = 0;	/* max context domains in the system */
571mmu_ctx_t	**mmu_ctxs_tbl;		/* global array of context domains */
572uint64_t	mmu_saved_gnum = 0;	/* to init incoming MMUs' gnums */
573
574#define	DEFAULT_NUM_CTXS_PER_MMU 8192
575static uint_t	nctxs = DEFAULT_NUM_CTXS_PER_MMU;
576
577int		cache;			/* describes system cache */
578
579caddr_t		ktsb_base;		/* kernel 8k-indexed tsb base address */
580uint64_t	ktsb_pbase;		/* kernel 8k-indexed tsb phys address */
581int		ktsb_szcode;		/* kernel 8k-indexed tsb size code */
582int		ktsb_sz;		/* kernel 8k-indexed tsb size */
583
584caddr_t		ktsb4m_base;		/* kernel 4m-indexed tsb base address */
585uint64_t	ktsb4m_pbase;		/* kernel 4m-indexed tsb phys address */
586int		ktsb4m_szcode;		/* kernel 4m-indexed tsb size code */
587int		ktsb4m_sz;		/* kernel 4m-indexed tsb size */
588
589uint64_t	kpm_tsbbase;		/* kernel seg_kpm 4M TSB base address */
590int		kpm_tsbsz;		/* kernel seg_kpm 4M TSB size code */
591uint64_t	kpmsm_tsbbase;		/* kernel seg_kpm 8K TSB base address */
592int		kpmsm_tsbsz;		/* kernel seg_kpm 8K TSB size code */
593
594#ifndef sun4v
595int		utsb_dtlb_ttenum = -1;	/* index in TLB for utsb locked TTE */
596int		utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
597int		dtlb_resv_ttenum;	/* index in TLB of first reserved TTE */
598caddr_t		utsb_vabase;		/* reserved kernel virtual memory */
599caddr_t		utsb4m_vabase;		/* for trap handler TSB accesses */
600#endif /* sun4v */
601uint64_t	tsb_alloc_bytes = 0;	/* bytes allocated to TSBs */
602vmem_t		*kmem_tsb_default_arena[NLGRPS_MAX];	/* For dynamic TSBs */
603vmem_t		*kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
604
605/*
606 * Size to use for TSB slabs.  Future platforms that support page sizes
607 * larger than 4M may wish to change these values, and provide their own
608 * assembly macros for building and decoding the TSB base register contents.
609 * Note disable_large_pages will override the value set here.
610 */
611static	uint_t tsb_slab_ttesz = TTE4M;
612size_t	tsb_slab_size = MMU_PAGESIZE4M;
613uint_t	tsb_slab_shift = MMU_PAGESHIFT4M;
614/* PFN mask for TTE */
615size_t	tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
616
617/*
618 * Size to use for TSB slabs.  These are used only when 256M tsb arenas
619 * exist.
620 */
621static uint_t	bigtsb_slab_ttesz = TTE256M;
622static size_t	bigtsb_slab_size = MMU_PAGESIZE256M;
623static uint_t	bigtsb_slab_shift = MMU_PAGESHIFT256M;
624/* 256M page alignment for 8K pfn */
625static size_t	bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
626
627/* largest TSB size to grow to, will be smaller on smaller memory systems */
628static int	tsb_max_growsize = 0;
629
630/*
631 * Tunable parameters dealing with TSB policies.
632 */
633
634/*
635 * This undocumented tunable forces all 8K TSBs to be allocated from
636 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
637 */
638#ifdef	DEBUG
639int	tsb_forceheap = 0;
640#endif	/* DEBUG */
641
642/*
643 * Decide whether to use per-lgroup arenas, or one global set of
644 * TSB arenas.  The default is not to break up per-lgroup, since
645 * most platforms don't recognize any tangible benefit from it.
646 */
647int	tsb_lgrp_affinity = 0;
648
649/*
650 * Used for growing the TSB based on the process RSS.
651 * tsb_rss_factor is based on the smallest TSB, and is
652 * shifted by the TSB size to determine if we need to grow.
653 * The default will grow the TSB if the number of TTEs for
654 * this page size exceeds 75% of the number of TSB entries,
655 * which should _almost_ eliminate all conflict misses
656 * (at the expense of using up lots and lots of memory).
657 */
658#define	TSB_RSS_FACTOR		(TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
659#define	SFMMU_RSS_TSBSIZE(tsbszc)	(tsb_rss_factor << tsbszc)
660#define	SELECT_TSB_SIZECODE(pgcnt) ( \
661	(enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
662	default_tsb_size)
663#define	TSB_OK_SHRINK()	\
664	(tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
665#define	TSB_OK_GROW()	\
666	(tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
667
668int	enable_tsb_rss_sizing = 1;
669int	tsb_rss_factor	= (int)TSB_RSS_FACTOR;
670
671/* which TSB size code to use for new address spaces or if rss sizing off */
672int default_tsb_size = TSB_8K_SZCODE;
673
674static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
675uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
676#define	TSB_ALLOC_HIWATER_FACTOR_DEFAULT	32
677
678#ifdef DEBUG
679static int tsb_random_size = 0;	/* set to 1 to test random tsb sizes on alloc */
680static int tsb_grow_stress = 0;	/* if set to 1, keep replacing TSB w/ random */
681static int tsb_alloc_mtbf = 0;	/* fail allocation every n attempts */
682static int tsb_alloc_fail_mtbf = 0;
683static int tsb_alloc_count = 0;
684#endif /* DEBUG */
685
686/* if set to 1, will remap valid TTEs when growing TSB. */
687int tsb_remap_ttes = 1;
688
689/*
690 * If we have more than this many mappings, allocate a second TSB.
691 * This default is chosen because the I/D fully associative TLBs are
692 * assumed to have at least 8 available entries. Platforms with a
693 * larger fully-associative TLB could probably override the default.
694 */
695
696#ifdef sun4v
697int tsb_sectsb_threshold = 0;
698#else
699int tsb_sectsb_threshold = 8;
700#endif
701
702/*
703 * kstat data
704 */
705struct sfmmu_global_stat sfmmu_global_stat;
706struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
707
708/*
709 * Global data
710 */
711sfmmu_t 	*ksfmmup;		/* kernel's hat id */
712
713#ifdef DEBUG
714static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
715#endif
716
717/* sfmmu locking operations */
718static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
719static int	sfmmu_mlspl_held(struct page *, int);
720
721kmutex_t *sfmmu_page_enter(page_t *);
722void	sfmmu_page_exit(kmutex_t *);
723int	sfmmu_page_spl_held(struct page *);
724
725/* sfmmu internal locking operations - accessed directly */
726static void	sfmmu_mlist_reloc_enter(page_t *, page_t *,
727				kmutex_t **, kmutex_t **);
728static void	sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
729static hatlock_t *
730		sfmmu_hat_enter(sfmmu_t *);
731static hatlock_t *
732		sfmmu_hat_tryenter(sfmmu_t *);
733static void	sfmmu_hat_exit(hatlock_t *);
734static void	sfmmu_hat_lock_all(void);
735static void	sfmmu_hat_unlock_all(void);
736static void	sfmmu_ismhat_enter(sfmmu_t *, int);
737static void	sfmmu_ismhat_exit(sfmmu_t *, int);
738
739kpm_hlk_t	*kpmp_table;
740uint_t		kpmp_table_sz;	/* must be a power of 2 */
741uchar_t		kpmp_shift;
742
743kpm_shlk_t	*kpmp_stable;
744uint_t		kpmp_stable_sz;	/* must be a power of 2 */
745
746/*
747 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
748 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
749 */
750#if ((2*NCPU_P2) > 128)
751#define	SPL_SHIFT	((unsigned)(NCPU_LOG2 + 1))
752#else
753#define	SPL_SHIFT	7U
754#endif
755#define	SPL_TABLE_SIZE	(1U << SPL_SHIFT)
756#define	SPL_MASK	(SPL_TABLE_SIZE - 1)
757
758/*
759 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
760 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
761 */
762#define	SPL_INDEX(pp) \
763	((((uintptr_t)(pp) >> PP_SHIFT) ^ \
764	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
765	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
766	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
767	SPL_MASK)
768
769#define	SPL_HASH(pp)    \
770	(&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
771
772static	pad_mutex_t	sfmmu_page_lock[SPL_TABLE_SIZE];
773
774/* Array of mutexes protecting a page's mapping list and p_nrm field. */
775
776#define	MML_TABLE_SIZE	SPL_TABLE_SIZE
777#define	MLIST_HASH(pp)	(&mml_table[SPL_INDEX(pp)].pad_mutex)
778
779static pad_mutex_t	mml_table[MML_TABLE_SIZE];
780
781/*
782 * hat_unload_callback() will group together callbacks in order
783 * to avoid xt_sync() calls.  This is the maximum size of the group.
784 */
785#define	MAX_CB_ADDR	32
786
787tte_t	hw_tte;
788static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
789
790static char	*mmu_ctx_kstat_names[] = {
791	"mmu_ctx_tsb_exceptions",
792	"mmu_ctx_tsb_raise_exception",
793	"mmu_ctx_wrap_around",
794};
795
796/*
797 * Wrapper for vmem_xalloc since vmem_create only allows limited
798 * parameters for vm_source_alloc functions.  This function allows us
799 * to specify alignment consistent with the size of the object being
800 * allocated.
801 */
802static void *
803sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
804{
805	return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
806}
807
808/* Common code for setting tsb_alloc_hiwater. */
809#define	SFMMU_SET_TSB_ALLOC_HIWATER(pages)	tsb_alloc_hiwater = \
810		ptob(pages) / tsb_alloc_hiwater_factor
811
812/*
813 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
814 * a single TSB.  physmem is the number of physical pages so we need physmem 8K
815 * TTEs to represent all those physical pages.  We round this up by using
816 * 1<<highbit().  To figure out which size code to use, remember that the size
817 * code is just an amount to shift the smallest TSB size to get the size of
818 * this TSB.  So we subtract that size, TSB_START_SIZE, from highbit() (or
819 * highbit() - 1) to get the size code for the smallest TSB that can represent
820 * all of physical memory, while erring on the side of too much.
821 *
822 * Restrict tsb_max_growsize to make sure that:
823 *	1) TSBs can't grow larger than the TSB slab size
824 *	2) TSBs can't grow larger than UTSB_MAX_SZCODE.
825 */
826#define	SFMMU_SET_TSB_MAX_GROWSIZE(pages) {				\
827	int	_i, _szc, _slabszc, _tsbszc;				\
828									\
829	_i = highbit(pages);						\
830	if ((1 << (_i - 1)) == (pages))					\
831		_i--;		/* 2^n case, round down */              \
832	_szc = _i - TSB_START_SIZE;					\
833	_slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
834	_tsbszc = MIN(_szc, _slabszc);                                  \
835	tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE);               \
836}
837
838/*
839 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
840 * tsb_info which handles that TTE size.
841 */
842#define	SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) {			\
843	(tsbinfop) = (sfmmup)->sfmmu_tsb;				\
844	ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) ||		\
845	    sfmmu_hat_lock_held(sfmmup));				\
846	if ((tte_szc) >= TTE4M)	{					\
847		ASSERT((tsbinfop) != NULL);				\
848		(tsbinfop) = (tsbinfop)->tsb_next;			\
849	}								\
850}
851
852/*
853 * Macro to use to unload entries from the TSB.
854 * It has knowledge of which page sizes get replicated in the TSB
855 * and will call the appropriate unload routine for the appropriate size.
856 */
857#define	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat)		\
858{									\
859	int ttesz = get_hblk_ttesz(hmeblkp);				\
860	if (ttesz == TTE8K || ttesz == TTE4M) {				\
861		sfmmu_unload_tsb(sfmmup, addr, ttesz);			\
862	} else {							\
863		caddr_t sva = ismhat ? addr : 				\
864		    (caddr_t)get_hblk_base(hmeblkp);			\
865		caddr_t eva = sva + get_hblk_span(hmeblkp);		\
866		ASSERT(addr >= sva && addr < eva);			\
867		sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);	\
868	}								\
869}
870
871
872/* Update tsb_alloc_hiwater after memory is configured. */
873/*ARGSUSED*/
874static void
875sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
876{
877	/* Assumes physmem has already been updated. */
878	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
879	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
880}
881
882/*
883 * Update tsb_alloc_hiwater before memory is deleted.  We'll do nothing here
884 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
885 * deleted.
886 */
887/*ARGSUSED*/
888static int
889sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
890{
891	return (0);
892}
893
894/* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
895/*ARGSUSED*/
896static void
897sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
898{
899	/*
900	 * Whether the delete was cancelled or not, just go ahead and update
901	 * tsb_alloc_hiwater and tsb_max_growsize.
902	 */
903	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
904	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
905}
906
907static kphysm_setup_vector_t sfmmu_update_vec = {
908	KPHYSM_SETUP_VECTOR_VERSION,	/* version */
909	sfmmu_update_post_add,		/* post_add */
910	sfmmu_update_pre_del,		/* pre_del */
911	sfmmu_update_post_del		/* post_del */
912};
913
914
915/*
916 * HME_BLK HASH PRIMITIVES
917 */
918
919/*
920 * Enter a hme on the mapping list for page pp.
921 * When large pages are more prevalent in the system we might want to
922 * keep the mapping list in ascending order by the hment size. For now,
923 * small pages are more frequent, so don't slow it down.
924 */
925#define	HME_ADD(hme, pp)					\
926{								\
927	ASSERT(sfmmu_mlist_held(pp));				\
928								\
929	hme->hme_prev = NULL;					\
930	hme->hme_next = pp->p_mapping;				\
931	hme->hme_page = pp;					\
932	if (pp->p_mapping) {					\
933		((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
934		ASSERT(pp->p_share > 0);			\
935	} else  {						\
936		/* EMPTY */					\
937		ASSERT(pp->p_share == 0);			\
938	}							\
939	pp->p_mapping = hme;					\
940	pp->p_share++;						\
941}
942
943/*
944 * Enter a hme on the mapping list for page pp.
945 * If we are unmapping a large translation, we need to make sure that the
946 * change is reflect in the corresponding bit of the p_index field.
947 */
948#define	HME_SUB(hme, pp)					\
949{								\
950	ASSERT(sfmmu_mlist_held(pp));				\
951	ASSERT(hme->hme_page == pp || IS_PAHME(hme));		\
952								\
953	if (pp->p_mapping == NULL) {				\
954		panic("hme_remove - no mappings");		\
955	}							\
956								\
957	membar_stst();	/* ensure previous stores finish */	\
958								\
959	ASSERT(pp->p_share > 0);				\
960	pp->p_share--;						\
961								\
962	if (hme->hme_prev) {					\
963		ASSERT(pp->p_mapping != hme);			\
964		ASSERT(hme->hme_prev->hme_page == pp ||		\
965			IS_PAHME(hme->hme_prev));		\
966		hme->hme_prev->hme_next = hme->hme_next;	\
967	} else {						\
968		ASSERT(pp->p_mapping == hme);			\
969		pp->p_mapping = hme->hme_next;			\
970		ASSERT((pp->p_mapping == NULL) ?		\
971			(pp->p_share == 0) : 1);		\
972	}							\
973								\
974	if (hme->hme_next) {					\
975		ASSERT(hme->hme_next->hme_page == pp ||		\
976			IS_PAHME(hme->hme_next));		\
977		hme->hme_next->hme_prev = hme->hme_prev;	\
978	}							\
979								\
980	/* zero out the entry */				\
981	hme->hme_next = NULL;					\
982	hme->hme_prev = NULL;					\
983	hme->hme_page = NULL;					\
984								\
985	if (hme_size(hme) > TTE8K) {				\
986		/* remove mappings for remainder of large pg */	\
987		sfmmu_rm_large_mappings(pp, hme_size(hme));	\
988	}							\
989}
990
991/*
992 * This function returns the hment given the hme_blk and a vaddr.
993 * It assumes addr has already been checked to belong to hme_blk's
994 * range.
995 */
996#define	HBLKTOHME(hment, hmeblkp, addr)					\
997{									\
998	int index;							\
999	HBLKTOHME_IDX(hment, hmeblkp, addr, index)			\
1000}
1001
1002/*
1003 * Version of HBLKTOHME that also returns the index in hmeblkp
1004 * of the hment.
1005 */
1006#define	HBLKTOHME_IDX(hment, hmeblkp, addr, idx)			\
1007{									\
1008	ASSERT(in_hblk_range((hmeblkp), (addr)));			\
1009									\
1010	if (get_hblk_ttesz(hmeblkp) == TTE8K) {				\
1011		idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1012	} else								\
1013		idx = 0;						\
1014									\
1015	(hment) = &(hmeblkp)->hblk_hme[idx];				\
1016}
1017
1018/*
1019 * Disable any page sizes not supported by the CPU
1020 */
1021void
1022hat_init_pagesizes()
1023{
1024	int 		i;
1025
1026	mmu_exported_page_sizes = 0;
1027	for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1028
1029		szc_2_userszc[i] = (uint_t)-1;
1030		userszc_2_szc[i] = (uint_t)-1;
1031
1032		if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1033			disable_large_pages |= (1 << i);
1034		} else {
1035			szc_2_userszc[i] = mmu_exported_page_sizes;
1036			userszc_2_szc[mmu_exported_page_sizes] = i;
1037			mmu_exported_page_sizes++;
1038		}
1039	}
1040
1041	disable_ism_large_pages |= disable_large_pages;
1042	disable_auto_data_large_pages = disable_large_pages;
1043	disable_auto_text_large_pages = disable_large_pages;
1044
1045	/*
1046	 * Initialize mmu-specific large page sizes.
1047	 */
1048	if (&mmu_large_pages_disabled) {
1049		disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1050		disable_ism_large_pages |=
1051		    mmu_large_pages_disabled(HAT_LOAD_SHARE);
1052		disable_auto_data_large_pages |=
1053		    mmu_large_pages_disabled(HAT_AUTO_DATA);
1054		disable_auto_text_large_pages |=
1055		    mmu_large_pages_disabled(HAT_AUTO_TEXT);
1056	}
1057}
1058
1059/*
1060 * Initialize the hardware address translation structures.
1061 */
1062void
1063hat_init(void)
1064{
1065	int 		i;
1066	uint_t		sz;
1067	size_t		size;
1068
1069	hat_lock_init();
1070	hat_kstat_init();
1071
1072	/*
1073	 * Hardware-only bits in a TTE
1074	 */
1075	MAKE_TTE_MASK(&hw_tte);
1076
1077	hat_init_pagesizes();
1078
1079	/* Initialize the hash locks */
1080	for (i = 0; i < khmehash_num; i++) {
1081		mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1082		    MUTEX_DEFAULT, NULL);
1083		khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084	}
1085	for (i = 0; i < uhmehash_num; i++) {
1086		mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1087		    MUTEX_DEFAULT, NULL);
1088		uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1089	}
1090	khmehash_num--;		/* make sure counter starts from 0 */
1091	uhmehash_num--;		/* make sure counter starts from 0 */
1092
1093	/*
1094	 * Allocate context domain structures.
1095	 *
1096	 * A platform may choose to modify max_mmu_ctxdoms in
1097	 * set_platform_defaults(). If a platform does not define
1098	 * a set_platform_defaults() or does not choose to modify
1099	 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1100	 *
1101	 * For all platforms that have CPUs sharing MMUs, this
1102	 * value must be defined.
1103	 */
1104	if (max_mmu_ctxdoms == 0)
1105		max_mmu_ctxdoms = max_ncpus;
1106
1107	size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1108	mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1109
1110	/* mmu_ctx_t is 64 bytes aligned */
1111	mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1112	    sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1113	/*
1114	 * MMU context domain initialization for the Boot CPU.
1115	 * This needs the context domains array allocated above.
1116	 */
1117	mutex_enter(&cpu_lock);
1118	sfmmu_cpu_init(CPU);
1119	mutex_exit(&cpu_lock);
1120
1121	/*
1122	 * Intialize ism mapping list lock.
1123	 */
1124
1125	mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1126
1127	/*
1128	 * Each sfmmu structure carries an array of MMU context info
1129	 * structures, one per context domain. The size of this array depends
1130	 * on the maximum number of context domains. So, the size of the
1131	 * sfmmu structure varies per platform.
1132	 *
1133	 * sfmmu is allocated from static arena, because trap
1134	 * handler at TL > 0 is not allowed to touch kernel relocatable
1135	 * memory. sfmmu's alignment is changed to 64 bytes from
1136	 * default 8 bytes, as the lower 6 bits will be used to pass
1137	 * pgcnt to vtag_flush_pgcnt_tl1.
1138	 */
1139	size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1140
1141	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1142	    64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1143	    NULL, NULL, static_arena, 0);
1144
1145	sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1146	    sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1147
1148	/*
1149	 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1150	 * from the heap when low on memory or when TSB_FORCEALLOC is
1151	 * specified, don't use magazines to cache them--we want to return
1152	 * them to the system as quickly as possible.
1153	 */
1154	sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1155	    MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1156	    static_arena, KMC_NOMAGAZINE);
1157
1158	/*
1159	 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1160	 * memory, which corresponds to the old static reserve for TSBs.
1161	 * tsb_alloc_hiwater_factor defaults to 32.  This caps the amount of
1162	 * memory we'll allocate for TSB slabs; beyond this point TSB
1163	 * allocations will be taken from the kernel heap (via
1164	 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1165	 * consumer.
1166	 */
1167	if (tsb_alloc_hiwater_factor == 0) {
1168		tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1169	}
1170	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1171
1172	for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1173		if (!(disable_large_pages & (1 << sz)))
1174			break;
1175	}
1176
1177	if (sz < tsb_slab_ttesz) {
1178		tsb_slab_ttesz = sz;
1179		tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1180		tsb_slab_size = 1 << tsb_slab_shift;
1181		tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1182		use_bigtsb_arena = 0;
1183	} else if (use_bigtsb_arena &&
1184	    (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1185		use_bigtsb_arena = 0;
1186	}
1187
1188	if (!use_bigtsb_arena) {
1189		bigtsb_slab_shift = tsb_slab_shift;
1190	}
1191	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1192
1193	/*
1194	 * On smaller memory systems, allocate TSB memory in smaller chunks
1195	 * than the default 4M slab size. We also honor disable_large_pages
1196	 * here.
1197	 *
1198	 * The trap handlers need to be patched with the final slab shift,
1199	 * since they need to be able to construct the TSB pointer at runtime.
1200	 */
1201	if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1202	    !(disable_large_pages & (1 << TTE512K))) {
1203		tsb_slab_ttesz = TTE512K;
1204		tsb_slab_shift = MMU_PAGESHIFT512K;
1205		tsb_slab_size = MMU_PAGESIZE512K;
1206		tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1207		use_bigtsb_arena = 0;
1208	}
1209
1210	if (!use_bigtsb_arena) {
1211		bigtsb_slab_ttesz = tsb_slab_ttesz;
1212		bigtsb_slab_shift = tsb_slab_shift;
1213		bigtsb_slab_size = tsb_slab_size;
1214		bigtsb_slab_mask = tsb_slab_mask;
1215	}
1216
1217
1218	/*
1219	 * Set up memory callback to update tsb_alloc_hiwater and
1220	 * tsb_max_growsize.
1221	 */
1222	i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1223	ASSERT(i == 0);
1224
1225	/*
1226	 * kmem_tsb_arena is the source from which large TSB slabs are
1227	 * drawn.  The quantum of this arena corresponds to the largest
1228	 * TSB size we can dynamically allocate for user processes.
1229	 * Currently it must also be a supported page size since we
1230	 * use exactly one translation entry to map each slab page.
1231	 *
1232	 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1233	 * which most TSBs are allocated.  Since most TSB allocations are
1234	 * typically 8K we have a kmem cache we stack on top of each
1235	 * kmem_tsb_default_arena to speed up those allocations.
1236	 *
1237	 * Note the two-level scheme of arenas is required only
1238	 * because vmem_create doesn't allow us to specify alignment
1239	 * requirements.  If this ever changes the code could be
1240	 * simplified to use only one level of arenas.
1241	 *
1242	 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1243	 * will be provided in addition to the 4M kmem_tsb_arena.
1244	 */
1245	if (use_bigtsb_arena) {
1246		kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1247		    bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1248		    vmem_xfree, heap_arena, 0, VM_SLEEP);
1249	}
1250
1251	kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1252	    sfmmu_vmem_xalloc_aligned_wrapper,
1253	    vmem_xfree, heap_arena, 0, VM_SLEEP);
1254
1255	if (tsb_lgrp_affinity) {
1256		char s[50];
1257		for (i = 0; i < NLGRPS_MAX; i++) {
1258			if (use_bigtsb_arena) {
1259				(void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1260				kmem_bigtsb_default_arena[i] = vmem_create(s,
1261				    NULL, 0, 2 * tsb_slab_size,
1262				    sfmmu_tsb_segkmem_alloc,
1263				    sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1264				    0, VM_SLEEP | VM_BESTFIT);
1265			}
1266
1267			(void) sprintf(s, "kmem_tsb_lgrp%d", i);
1268			kmem_tsb_default_arena[i] = vmem_create(s,
1269			    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1270			    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1271			    VM_SLEEP | VM_BESTFIT);
1272
1273			(void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1274			sfmmu_tsb_cache[i] = kmem_cache_create(s,
1275			    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1276			    kmem_tsb_default_arena[i], 0);
1277		}
1278	} else {
1279		if (use_bigtsb_arena) {
1280			kmem_bigtsb_default_arena[0] =
1281			    vmem_create("kmem_bigtsb_default", NULL, 0,
1282			    2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1283			    sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1284			    VM_SLEEP | VM_BESTFIT);
1285		}
1286
1287		kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1288		    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1289		    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1290		    VM_SLEEP | VM_BESTFIT);
1291		sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1292		    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1293		    kmem_tsb_default_arena[0], 0);
1294	}
1295
1296	sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1297	    HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1298	    sfmmu_hblkcache_destructor,
1299	    sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1300	    hat_memload_arena, KMC_NOHASH);
1301
1302	hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1303	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1304	    VMC_DUMPSAFE | VM_SLEEP);
1305
1306	sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1307	    HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1308	    sfmmu_hblkcache_destructor,
1309	    NULL, (void *)HME1BLK_SZ,
1310	    hat_memload1_arena, KMC_NOHASH);
1311
1312	pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1313	    0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1314
1315	ism_blk_cache = kmem_cache_create("ism_blk_cache",
1316	    sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1317	    NULL, NULL, static_arena, KMC_NOHASH);
1318
1319	ism_ment_cache = kmem_cache_create("ism_ment_cache",
1320	    sizeof (ism_ment_t), 0, NULL, NULL,
1321	    NULL, NULL, NULL, 0);
1322
1323	/*
1324	 * We grab the first hat for the kernel,
1325	 */
1326	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1327	kas.a_hat = hat_alloc(&kas);
1328	AS_LOCK_EXIT(&kas, &kas.a_lock);
1329
1330	/*
1331	 * Initialize hblk_reserve.
1332	 */
1333	((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1334	    va_to_pa((caddr_t)hblk_reserve);
1335
1336#ifndef UTSB_PHYS
1337	/*
1338	 * Reserve some kernel virtual address space for the locked TTEs
1339	 * that allow us to probe the TSB from TL>0.
1340	 */
1341	utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1342	    0, 0, NULL, NULL, VM_SLEEP);
1343	utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1344	    0, 0, NULL, NULL, VM_SLEEP);
1345#endif
1346
1347#ifdef VAC
1348	/*
1349	 * The big page VAC handling code assumes VAC
1350	 * will not be bigger than the smallest big
1351	 * page- which is 64K.
1352	 */
1353	if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1354		cmn_err(CE_PANIC, "VAC too big!");
1355	}
1356#endif
1357
1358	(void) xhat_init();
1359
1360	uhme_hash_pa = va_to_pa(uhme_hash);
1361	khme_hash_pa = va_to_pa(khme_hash);
1362
1363	/*
1364	 * Initialize relocation locks. kpr_suspendlock is held
1365	 * at PIL_MAX to prevent interrupts from pinning the holder
1366	 * of a suspended TTE which may access it leading to a
1367	 * deadlock condition.
1368	 */
1369	mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1370	mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1371
1372	/*
1373	 * If Shared context support is disabled via /etc/system
1374	 * set shctx_on to 0 here if it was set to 1 earlier in boot
1375	 * sequence by cpu module initialization code.
1376	 */
1377	if (shctx_on && disable_shctx) {
1378		shctx_on = 0;
1379	}
1380
1381	if (shctx_on) {
1382		srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1383		    sizeof (srd_buckets[0]), KM_SLEEP);
1384		for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1385			mutex_init(&srd_buckets[i].srdb_lock, NULL,
1386			    MUTEX_DEFAULT, NULL);
1387		}
1388
1389		srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1390		    0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1391		    NULL, NULL, NULL, 0);
1392		region_cache = kmem_cache_create("region_cache",
1393		    sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1394		    sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1395		scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1396		    0, sfmmu_scdcache_constructor,  sfmmu_scdcache_destructor,
1397		    NULL, NULL, NULL, 0);
1398	}
1399
1400	/*
1401	 * Pre-allocate hrm_hashtab before enabling the collection of
1402	 * refmod statistics.  Allocating on the fly would mean us
1403	 * running the risk of suffering recursive mutex enters or
1404	 * deadlocks.
1405	 */
1406	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1407	    KM_SLEEP);
1408
1409	/* Allocate per-cpu pending freelist of hmeblks */
1410	cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1411	    KM_SLEEP);
1412	cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1413	    (uintptr_t)cpu_hme_pend, 64);
1414
1415	for (i = 0; i < NCPU; i++) {
1416		mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1417		    NULL);
1418	}
1419
1420	if (cpu_hme_pend_thresh == 0) {
1421		cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1422	}
1423}
1424
1425/*
1426 * Initialize locking for the hat layer, called early during boot.
1427 */
1428static void
1429hat_lock_init()
1430{
1431	int i;
1432
1433	/*
1434	 * initialize the array of mutexes protecting a page's mapping
1435	 * list and p_nrm field.
1436	 */
1437	for (i = 0; i < MML_TABLE_SIZE; i++)
1438		mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1439
1440	if (kpm_enable) {
1441		for (i = 0; i < kpmp_table_sz; i++) {
1442			mutex_init(&kpmp_table[i].khl_mutex, NULL,
1443			    MUTEX_DEFAULT, NULL);
1444		}
1445	}
1446
1447	/*
1448	 * Initialize array of mutex locks that protects sfmmu fields and
1449	 * TSB lists.
1450	 */
1451	for (i = 0; i < SFMMU_NUM_LOCK; i++)
1452		mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1453		    NULL);
1454}
1455
1456#define	SFMMU_KERNEL_MAXVA \
1457	(kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1458
1459/*
1460 * Allocate a hat structure.
1461 * Called when an address space first uses a hat.
1462 */
1463struct hat *
1464hat_alloc(struct as *as)
1465{
1466	sfmmu_t *sfmmup;
1467	int i;
1468	uint64_t cnum;
1469	extern uint_t get_color_start(struct as *);
1470
1471	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1472	sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1473	sfmmup->sfmmu_as = as;
1474	sfmmup->sfmmu_flags = 0;
1475	sfmmup->sfmmu_tteflags = 0;
1476	sfmmup->sfmmu_rtteflags = 0;
1477	LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1478
1479	if (as == &kas) {
1480		ksfmmup = sfmmup;
1481		sfmmup->sfmmu_cext = 0;
1482		cnum = KCONTEXT;
1483
1484		sfmmup->sfmmu_clrstart = 0;
1485		sfmmup->sfmmu_tsb = NULL;
1486		/*
1487		 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1488		 * to setup tsb_info for ksfmmup.
1489		 */
1490	} else {
1491
1492		/*
1493		 * Just set to invalid ctx. When it faults, it will
1494		 * get a valid ctx. This would avoid the situation
1495		 * where we get a ctx, but it gets stolen and then
1496		 * we fault when we try to run and so have to get
1497		 * another ctx.
1498		 */
1499		sfmmup->sfmmu_cext = 0;
1500		cnum = INVALID_CONTEXT;
1501
1502		/* initialize original physical page coloring bin */
1503		sfmmup->sfmmu_clrstart = get_color_start(as);
1504#ifdef DEBUG
1505		if (tsb_random_size) {
1506			uint32_t randval = (uint32_t)gettick() >> 4;
1507			int size = randval % (tsb_max_growsize + 1);
1508
1509			/* chose a random tsb size for stress testing */
1510			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1511			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1512		} else
1513#endif /* DEBUG */
1514			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1515			    default_tsb_size,
1516			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1517		sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1518		ASSERT(sfmmup->sfmmu_tsb != NULL);
1519	}
1520
1521	ASSERT(max_mmu_ctxdoms > 0);
1522	for (i = 0; i < max_mmu_ctxdoms; i++) {
1523		sfmmup->sfmmu_ctxs[i].cnum = cnum;
1524		sfmmup->sfmmu_ctxs[i].gnum = 0;
1525	}
1526
1527	for (i = 0; i < max_mmu_page_sizes; i++) {
1528		sfmmup->sfmmu_ttecnt[i] = 0;
1529		sfmmup->sfmmu_scdrttecnt[i] = 0;
1530		sfmmup->sfmmu_ismttecnt[i] = 0;
1531		sfmmup->sfmmu_scdismttecnt[i] = 0;
1532		sfmmup->sfmmu_pgsz[i] = TTE8K;
1533	}
1534	sfmmup->sfmmu_tsb0_4minflcnt = 0;
1535	sfmmup->sfmmu_iblk = NULL;
1536	sfmmup->sfmmu_ismhat = 0;
1537	sfmmup->sfmmu_scdhat = 0;
1538	sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1539	if (sfmmup == ksfmmup) {
1540		CPUSET_ALL(sfmmup->sfmmu_cpusran);
1541	} else {
1542		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1543	}
1544	sfmmup->sfmmu_free = 0;
1545	sfmmup->sfmmu_rmstat = 0;
1546	sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1547	sfmmup->sfmmu_xhat_provider = NULL;
1548	cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1549	sfmmup->sfmmu_srdp = NULL;
1550	SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1551	bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1552	sfmmup->sfmmu_scdp = NULL;
1553	sfmmup->sfmmu_scd_link.next = NULL;
1554	sfmmup->sfmmu_scd_link.prev = NULL;
1555	return (sfmmup);
1556}
1557
1558/*
1559 * Create per-MMU context domain kstats for a given MMU ctx.
1560 */
1561static void
1562sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1563{
1564	mmu_ctx_stat_t	stat;
1565	kstat_t		*mmu_kstat;
1566
1567	ASSERT(MUTEX_HELD(&cpu_lock));
1568	ASSERT(mmu_ctxp->mmu_kstat == NULL);
1569
1570	mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1571	    "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1572
1573	if (mmu_kstat == NULL) {
1574		cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1575		    mmu_ctxp->mmu_idx);
1576	} else {
1577		mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1578		for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1579			kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1580			    mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1581		mmu_ctxp->mmu_kstat = mmu_kstat;
1582		kstat_install(mmu_kstat);
1583	}
1584}
1585
1586/*
1587 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1588 * context domain information for a given CPU. If a platform does not
1589 * specify that interface, then the function below is used instead to return
1590 * default information. The defaults are as follows:
1591 *
1592 *	- The number of MMU context IDs supported on any CPU in the
1593 *	  system is 8K.
1594 *	- There is one MMU context domain per CPU.
1595 */
1596/*ARGSUSED*/
1597static void
1598sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1599{
1600	infop->mmu_nctxs = nctxs;
1601	infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1602}
1603
1604/*
1605 * Called during CPU initialization to set the MMU context-related information
1606 * for a CPU.
1607 *
1608 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1609 */
1610void
1611sfmmu_cpu_init(cpu_t *cp)
1612{
1613	mmu_ctx_info_t	info;
1614	mmu_ctx_t	*mmu_ctxp;
1615
1616	ASSERT(MUTEX_HELD(&cpu_lock));
1617
1618	if (&plat_cpuid_to_mmu_ctx_info == NULL)
1619		sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1620	else
1621		plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1622
1623	ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1624
1625	if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1626		/* Each mmu_ctx is cacheline aligned. */
1627		mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1628		bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1629
1630		mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1631		    (void *)ipltospl(DISP_LEVEL));
1632		mmu_ctxp->mmu_idx = info.mmu_idx;
1633		mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1634		/*
1635		 * Globally for lifetime of a system,
1636		 * gnum must always increase.
1637		 * mmu_saved_gnum is protected by the cpu_lock.
1638		 */
1639		mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1640		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1641
1642		sfmmu_mmu_kstat_create(mmu_ctxp);
1643
1644		mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1645	} else {
1646		ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1647		ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1648	}
1649
1650	/*
1651	 * The mmu_lock is acquired here to prevent races with
1652	 * the wrap-around code.
1653	 */
1654	mutex_enter(&mmu_ctxp->mmu_lock);
1655
1656
1657	mmu_ctxp->mmu_ncpus++;
1658	CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1659	CPU_MMU_IDX(cp) = info.mmu_idx;
1660	CPU_MMU_CTXP(cp) = mmu_ctxp;
1661
1662	mutex_exit(&mmu_ctxp->mmu_lock);
1663}
1664
1665static void
1666sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1667{
1668	ASSERT(MUTEX_HELD(&cpu_lock));
1669	ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1670
1671	mutex_destroy(&mmu_ctxp->mmu_lock);
1672
1673	if (mmu_ctxp->mmu_kstat)
1674		kstat_delete(mmu_ctxp->mmu_kstat);
1675
1676	/* mmu_saved_gnum is protected by the cpu_lock. */
1677	if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1678		mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1679
1680	kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1681}
1682
1683/*
1684 * Called to perform MMU context-related cleanup for a CPU.
1685 */
1686void
1687sfmmu_cpu_cleanup(cpu_t *cp)
1688{
1689	mmu_ctx_t	*mmu_ctxp;
1690
1691	ASSERT(MUTEX_HELD(&cpu_lock));
1692
1693	mmu_ctxp = CPU_MMU_CTXP(cp);
1694	ASSERT(mmu_ctxp != NULL);
1695
1696	/*
1697	 * The mmu_lock is acquired here to prevent races with
1698	 * the wrap-around code.
1699	 */
1700	mutex_enter(&mmu_ctxp->mmu_lock);
1701
1702	CPU_MMU_CTXP(cp) = NULL;
1703
1704	CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1705	if (--mmu_ctxp->mmu_ncpus == 0) {
1706		mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1707		mutex_exit(&mmu_ctxp->mmu_lock);
1708		sfmmu_ctxdom_free(mmu_ctxp);
1709		return;
1710	}
1711
1712	mutex_exit(&mmu_ctxp->mmu_lock);
1713}
1714
1715uint_t
1716sfmmu_ctxdom_nctxs(int idx)
1717{
1718	return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1719}
1720
1721#ifdef sun4v
1722/*
1723 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1724 * consistant after suspend/resume on system that can resume on a different
1725 * hardware than it was suspended.
1726 *
1727 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1728 * from being allocated.  It acquires all hat_locks, which blocks most access to
1729 * context data, except for a few cases that are handled separately or are
1730 * harmless.  It wraps each domain to increment gnum and invalidate on-CPU
1731 * contexts, and forces cnum to its max.  As a result of this call all user
1732 * threads that are running on CPUs trap and try to perform wrap around but
1733 * can't because hat_locks are taken.  Threads that were not on CPUs but started
1734 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1735 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1736 * on hat_lock trying to wrap.  sfmmu_ctxdom_lock() must be called before CPUs
1737 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1738 *
1739 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1740 * the CPUs that had them.  It must be called after CPUs have been paused. This
1741 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1742 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1743 * runs with interrupts disabled.  When CPUs are later resumed, they may enter
1744 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1745 * return failure.  Or, they will be blocked trying to acquire hat_lock. Thus
1746 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1747 * accessing the old context domains.
1748 *
1749 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1750 * allocates new context domains based on hardware layout.  It initializes
1751 * every CPU that had context domain before migration to have one again.
1752 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1753 * could deadlock acquiring locks held by paused CPUs.
1754 *
1755 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1756 * acquire new context ids and continue execution.
1757 *
1758 * Therefore functions should be called in the following order:
1759 *       suspend_routine()
1760 *		sfmmu_ctxdom_lock()
1761 *		pause_cpus()
1762 *		suspend()
1763 *			if (suspend failed)
1764 *				sfmmu_ctxdom_unlock()
1765 *		...
1766 *		sfmmu_ctxdom_remove()
1767 *		resume_cpus()
1768 *		sfmmu_ctxdom_update()
1769 *		sfmmu_ctxdom_unlock()
1770 */
1771static cpuset_t sfmmu_ctxdoms_pset;
1772
1773void
1774sfmmu_ctxdoms_remove()
1775{
1776	processorid_t	id;
1777	cpu_t		*cp;
1778
1779	/*
1780	 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1781	 * be restored post-migration. A CPU may be powered off and not have a
1782	 * domain, for example.
1783	 */
1784	CPUSET_ZERO(sfmmu_ctxdoms_pset);
1785
1786	for (id = 0; id < NCPU; id++) {
1787		if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1788			CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1789			CPU_MMU_CTXP(cp) = NULL;
1790		}
1791	}
1792}
1793
1794void
1795sfmmu_ctxdoms_lock(void)
1796{
1797	int		idx;
1798	mmu_ctx_t	*mmu_ctxp;
1799
1800	sfmmu_hat_lock_all();
1801
1802	/*
1803	 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1804	 * hat_lock is always taken before calling it.
1805	 *
1806	 * For each domain, set mmu_cnum to max so no more contexts can be
1807	 * allocated, and wrap to flush on-CPU contexts and force threads to
1808	 * acquire a new context when we later drop hat_lock after migration.
1809	 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1810	 * but the latter uses CAS and will miscompare and not overwrite it.
1811	 */
1812	kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1813	for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1814		if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1815			mutex_enter(&mmu_ctxp->mmu_lock);
1816			mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1817			/* make sure updated cnum visible */
1818			membar_enter();
1819			mutex_exit(&mmu_ctxp->mmu_lock);
1820			sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1821		}
1822	}
1823	kpreempt_enable();
1824}
1825
1826void
1827sfmmu_ctxdoms_unlock(void)
1828{
1829	sfmmu_hat_unlock_all();
1830}
1831
1832void
1833sfmmu_ctxdoms_update(void)
1834{
1835	processorid_t	id;
1836	cpu_t		*cp;
1837	uint_t		idx;
1838	mmu_ctx_t	*mmu_ctxp;
1839
1840	/*
1841	 * Free all context domains.  As side effect, this increases
1842	 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1843	 * init gnum in the new domains, which therefore will be larger than the
1844	 * sfmmu gnum for any process, guaranteeing that every process will see
1845	 * a new generation and allocate a new context regardless of what new
1846	 * domain it runs in.
1847	 */
1848	mutex_enter(&cpu_lock);
1849
1850	for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1851		if (mmu_ctxs_tbl[idx] != NULL) {
1852			mmu_ctxp = mmu_ctxs_tbl[idx];
1853			mmu_ctxs_tbl[idx] = NULL;
1854			sfmmu_ctxdom_free(mmu_ctxp);
1855		}
1856	}
1857
1858	for (id = 0; id < NCPU; id++) {
1859		if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1860		    (cp = cpu[id]) != NULL)
1861			sfmmu_cpu_init(cp);
1862	}
1863	mutex_exit(&cpu_lock);
1864}
1865#endif
1866
1867/*
1868 * Hat_setup, makes an address space context the current active one.
1869 * In sfmmu this translates to setting the secondary context with the
1870 * corresponding context.
1871 */
1872void
1873hat_setup(struct hat *sfmmup, int allocflag)
1874{
1875	hatlock_t *hatlockp;
1876
1877	/* Init needs some special treatment. */
1878	if (allocflag == HAT_INIT) {
1879		/*
1880		 * Make sure that we have
1881		 * 1. a TSB
1882		 * 2. a valid ctx that doesn't get stolen after this point.
1883		 */
1884		hatlockp = sfmmu_hat_enter(sfmmup);
1885
1886		/*
1887		 * Swap in the TSB.  hat_init() allocates tsbinfos without
1888		 * TSBs, but we need one for init, since the kernel does some
1889		 * special things to set up its stack and needs the TSB to
1890		 * resolve page faults.
1891		 */
1892		sfmmu_tsb_swapin(sfmmup, hatlockp);
1893
1894		sfmmu_get_ctx(sfmmup);
1895
1896		sfmmu_hat_exit(hatlockp);
1897	} else {
1898		ASSERT(allocflag == HAT_ALLOC);
1899
1900		hatlockp = sfmmu_hat_enter(sfmmup);
1901		kpreempt_disable();
1902
1903		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1904		/*
1905		 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1906		 * pagesize bits don't matter in this case since we are passing
1907		 * INVALID_CONTEXT to it.
1908		 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1909		 */
1910		sfmmu_setctx_sec(INVALID_CONTEXT);
1911		sfmmu_clear_utsbinfo();
1912
1913		kpreempt_enable();
1914		sfmmu_hat_exit(hatlockp);
1915	}
1916}
1917
1918/*
1919 * Free all the translation resources for the specified address space.
1920 * Called from as_free when an address space is being destroyed.
1921 */
1922void
1923hat_free_start(struct hat *sfmmup)
1924{
1925	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1926	ASSERT(sfmmup != ksfmmup);
1927	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1928
1929	sfmmup->sfmmu_free = 1;
1930	if (sfmmup->sfmmu_scdp != NULL) {
1931		sfmmu_leave_scd(sfmmup, 0);
1932	}
1933
1934	ASSERT(sfmmup->sfmmu_scdp == NULL);
1935}
1936
1937void
1938hat_free_end(struct hat *sfmmup)
1939{
1940	int i;
1941
1942	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1943	ASSERT(sfmmup->sfmmu_free == 1);
1944	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1945	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1946	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1947	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1948	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1949	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1950
1951	if (sfmmup->sfmmu_rmstat) {
1952		hat_freestat(sfmmup->sfmmu_as, NULL);
1953	}
1954
1955	while (sfmmup->sfmmu_tsb != NULL) {
1956		struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1957		sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1958		sfmmup->sfmmu_tsb = next;
1959	}
1960
1961	if (sfmmup->sfmmu_srdp != NULL) {
1962		sfmmu_leave_srd(sfmmup);
1963		ASSERT(sfmmup->sfmmu_srdp == NULL);
1964		for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965			if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1966				kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1967				    SFMMU_L2_HMERLINKS_SIZE);
1968				sfmmup->sfmmu_hmeregion_links[i] = NULL;
1969			}
1970		}
1971	}
1972	sfmmu_free_sfmmu(sfmmup);
1973
1974#ifdef DEBUG
1975	for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1976		ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1977	}
1978#endif
1979
1980	kmem_cache_free(sfmmuid_cache, sfmmup);
1981}
1982
1983/*
1984 * Set up any translation structures, for the specified address space,
1985 * that are needed or preferred when the process is being swapped in.
1986 */
1987/* ARGSUSED */
1988void
1989hat_swapin(struct hat *hat)
1990{
1991	ASSERT(hat->sfmmu_xhat_provider == NULL);
1992}
1993
1994/*
1995 * Free all of the translation resources, for the specified address space,
1996 * that can be freed while the process is swapped out. Called from as_swapout.
1997 * Also, free up the ctx that this process was using.
1998 */
1999void
2000hat_swapout(struct hat *sfmmup)
2001{
2002	struct hmehash_bucket *hmebp;
2003	struct hme_blk *hmeblkp;
2004	struct hme_blk *pr_hblk = NULL;
2005	struct hme_blk *nx_hblk;
2006	int i;
2007	struct hme_blk *list = NULL;
2008	hatlock_t *hatlockp;
2009	struct tsb_info *tsbinfop;
2010	struct free_tsb {
2011		struct free_tsb *next;
2012		struct tsb_info *tsbinfop;
2013	};			/* free list of TSBs */
2014	struct free_tsb *freelist, *last, *next;
2015
2016	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2017	SFMMU_STAT(sf_swapout);
2018
2019	/*
2020	 * There is no way to go from an as to all its translations in sfmmu.
2021	 * Here is one of the times when we take the big hit and traverse
2022	 * the hash looking for hme_blks to free up.  Not only do we free up
2023	 * this as hme_blks but all those that are free.  We are obviously
2024	 * swapping because we need memory so let's free up as much
2025	 * as we can.
2026	 *
2027	 * Note that we don't flush TLB/TSB here -- it's not necessary
2028	 * because:
2029	 *  1) we free the ctx we're using and throw away the TSB(s);
2030	 *  2) processes aren't runnable while being swapped out.
2031	 */
2032	ASSERT(sfmmup != KHATID);
2033	for (i = 0; i <= UHMEHASH_SZ; i++) {
2034		hmebp = &uhme_hash[i];
2035		SFMMU_HASH_LOCK(hmebp);
2036		hmeblkp = hmebp->hmeblkp;
2037		pr_hblk = NULL;
2038		while (hmeblkp) {
2039
2040			ASSERT(!hmeblkp->hblk_xhat_bit);
2041
2042			if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2043			    !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2044				ASSERT(!hmeblkp->hblk_shared);
2045				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2046				    (caddr_t)get_hblk_base(hmeblkp),
2047				    get_hblk_endaddr(hmeblkp),
2048				    NULL, HAT_UNLOAD);
2049			}
2050			nx_hblk = hmeblkp->hblk_next;
2051			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2052				ASSERT(!hmeblkp->hblk_lckcnt);
2053				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2054				    &list, 0);
2055			} else {
2056				pr_hblk = hmeblkp;
2057			}
2058			hmeblkp = nx_hblk;
2059		}
2060		SFMMU_HASH_UNLOCK(hmebp);
2061	}
2062
2063	sfmmu_hblks_list_purge(&list, 0);
2064
2065	/*
2066	 * Now free up the ctx so that others can reuse it.
2067	 */
2068	hatlockp = sfmmu_hat_enter(sfmmup);
2069
2070	sfmmu_invalidate_ctx(sfmmup);
2071
2072	/*
2073	 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2074	 * If TSBs were never swapped in, just return.
2075	 * This implies that we don't support partial swapping
2076	 * of TSBs -- either all are swapped out, or none are.
2077	 *
2078	 * We must hold the HAT lock here to prevent racing with another
2079	 * thread trying to unmap TTEs from the TSB or running the post-
2080	 * relocator after relocating the TSB's memory.  Unfortunately, we
2081	 * can't free memory while holding the HAT lock or we could
2082	 * deadlock, so we build a list of TSBs to be freed after marking
2083	 * the tsbinfos as swapped out and free them after dropping the
2084	 * lock.
2085	 */
2086	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2087		sfmmu_hat_exit(hatlockp);
2088		return;
2089	}
2090
2091	SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2092	last = freelist = NULL;
2093	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2094	    tsbinfop = tsbinfop->tsb_next) {
2095		ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2096
2097		/*
2098		 * Cast the TSB into a struct free_tsb and put it on the free
2099		 * list.
2100		 */
2101		if (freelist == NULL) {
2102			last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2103		} else {
2104			last->next = (struct free_tsb *)tsbinfop->tsb_va;
2105			last = last->next;
2106		}
2107		last->next = NULL;
2108		last->tsbinfop = tsbinfop;
2109		tsbinfop->tsb_flags |= TSB_SWAPPED;
2110		/*
2111		 * Zero out the TTE to clear the valid bit.
2112		 * Note we can't use a value like 0xbad because we want to
2113		 * ensure diagnostic bits are NEVER set on TTEs that might
2114		 * be loaded.  The intent is to catch any invalid access
2115		 * to the swapped TSB, such as a thread running with a valid
2116		 * context without first calling sfmmu_tsb_swapin() to
2117		 * allocate TSB memory.
2118		 */
2119		tsbinfop->tsb_tte.ll = 0;
2120	}
2121
2122	/* Now we can drop the lock and free the TSB memory. */
2123	sfmmu_hat_exit(hatlockp);
2124	for (; freelist != NULL; freelist = next) {
2125		next = freelist->next;
2126		sfmmu_tsb_free(freelist->tsbinfop);
2127	}
2128}
2129
2130/*
2131 * Duplicate the translations of an as into another newas
2132 */
2133/* ARGSUSED */
2134int
2135hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2136	uint_t flag)
2137{
2138	sf_srd_t *srdp;
2139	sf_scd_t *scdp;
2140	int i;
2141	extern uint_t get_color_start(struct as *);
2142
2143	ASSERT(hat->sfmmu_xhat_provider == NULL);
2144	ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2145	    (flag == HAT_DUP_SRD));
2146	ASSERT(hat != ksfmmup);
2147	ASSERT(newhat != ksfmmup);
2148	ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2149
2150	if (flag == HAT_DUP_COW) {
2151		panic("hat_dup: HAT_DUP_COW not supported");
2152	}
2153
2154	if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2155		ASSERT(srdp->srd_evp != NULL);
2156		VN_HOLD(srdp->srd_evp);
2157		ASSERT(srdp->srd_refcnt > 0);
2158		newhat->sfmmu_srdp = srdp;
2159		atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
2160	}
2161
2162	/*
2163	 * HAT_DUP_ALL flag is used after as duplication is done.
2164	 */
2165	if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2166		ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2167		newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2168		if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2169			newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2170		}
2171
2172		/* check if need to join scd */
2173		if ((scdp = hat->sfmmu_scdp) != NULL &&
2174		    newhat->sfmmu_scdp != scdp) {
2175			int ret;
2176			SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2177			    &scdp->scd_region_map, ret);
2178			ASSERT(ret);
2179			sfmmu_join_scd(scdp, newhat);
2180			ASSERT(newhat->sfmmu_scdp == scdp &&
2181			    scdp->scd_refcnt >= 2);
2182			for (i = 0; i < max_mmu_page_sizes; i++) {
2183				newhat->sfmmu_ismttecnt[i] =
2184				    hat->sfmmu_ismttecnt[i];
2185				newhat->sfmmu_scdismttecnt[i] =
2186				    hat->sfmmu_scdismttecnt[i];
2187			}
2188		}
2189
2190		sfmmu_check_page_sizes(newhat, 1);
2191	}
2192
2193	if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2194	    update_proc_pgcolorbase_after_fork != 0) {
2195		hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2196	}
2197	return (0);
2198}
2199
2200void
2201hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2202	uint_t attr, uint_t flags)
2203{
2204	hat_do_memload(hat, addr, pp, attr, flags,
2205	    SFMMU_INVALID_SHMERID);
2206}
2207
2208void
2209hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2210	uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2211{
2212	uint_t rid;
2213	if (rcookie == HAT_INVALID_REGION_COOKIE ||
2214	    hat->sfmmu_xhat_provider != NULL) {
2215		hat_do_memload(hat, addr, pp, attr, flags,
2216		    SFMMU_INVALID_SHMERID);
2217		return;
2218	}
2219	rid = (uint_t)((uint64_t)rcookie);
2220	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2221	hat_do_memload(hat, addr, pp, attr, flags, rid);
2222}
2223
2224/*
2225 * Set up addr to map to page pp with protection prot.
2226 * As an optimization we also load the TSB with the
2227 * corresponding tte but it is no big deal if  the tte gets kicked out.
2228 */
2229static void
2230hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2231	uint_t attr, uint_t flags, uint_t rid)
2232{
2233	tte_t tte;
2234
2235
2236	ASSERT(hat != NULL);
2237	ASSERT(PAGE_LOCKED(pp));
2238	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2239	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2240	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2241	SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2242
2243	if (PP_ISFREE(pp)) {
2244		panic("hat_memload: loading a mapping to free page %p",
2245		    (void *)pp);
2246	}
2247
2248	if (hat->sfmmu_xhat_provider) {
2249		/* no regions for xhats */
2250		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2251		XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2252		return;
2253	}
2254
2255	ASSERT((hat == ksfmmup) ||
2256	    AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2257
2258	if (flags & ~SFMMU_LOAD_ALLFLAG)
2259		cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2260		    flags & ~SFMMU_LOAD_ALLFLAG);
2261
2262	if (hat->sfmmu_rmstat)
2263		hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2264
2265#if defined(SF_ERRATA_57)
2266	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2267	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2268	    !(flags & HAT_LOAD_SHARE)) {
2269		cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2270		    " page executable");
2271		attr &= ~PROT_EXEC;
2272	}
2273#endif
2274
2275	sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2276	(void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2277
2278	/*
2279	 * Check TSB and TLB page sizes.
2280	 */
2281	if ((flags & HAT_LOAD_SHARE) == 0) {
2282		sfmmu_check_page_sizes(hat, 1);
2283	}
2284}
2285
2286/*
2287 * hat_devload can be called to map real memory (e.g.
2288 * /dev/kmem) and even though hat_devload will determine pf is
2289 * for memory, it will be unable to get a shared lock on the
2290 * page (because someone else has it exclusively) and will
2291 * pass dp = NULL.  If tteload doesn't get a non-NULL
2292 * page pointer it can't cache memory.
2293 */
2294void
2295hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2296	uint_t attr, int flags)
2297{
2298	tte_t tte;
2299	struct page *pp = NULL;
2300	int use_lgpg = 0;
2301
2302	ASSERT(hat != NULL);
2303
2304	if (hat->sfmmu_xhat_provider) {
2305		XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2306		return;
2307	}
2308
2309	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2310	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2311	ASSERT((hat == ksfmmup) ||
2312	    AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2313	if (len == 0)
2314		panic("hat_devload: zero len");
2315	if (flags & ~SFMMU_LOAD_ALLFLAG)
2316		cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2317		    flags & ~SFMMU_LOAD_ALLFLAG);
2318
2319#if defined(SF_ERRATA_57)
2320	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2321	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2322	    !(flags & HAT_LOAD_SHARE)) {
2323		cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2324		    " page executable");
2325		attr &= ~PROT_EXEC;
2326	}
2327#endif
2328
2329	/*
2330	 * If it's a memory page find its pp
2331	 */
2332	if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2333		pp = page_numtopp_nolock(pfn);
2334		if (pp == NULL) {
2335			flags |= HAT_LOAD_NOCONSIST;
2336		} else {
2337			if (PP_ISFREE(pp)) {
2338				panic("hat_memload: loading "
2339				    "a mapping to free page %p",
2340				    (void *)pp);
2341			}
2342			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2343				panic("hat_memload: loading a mapping "
2344				    "to unlocked relocatable page %p",
2345				    (void *)pp);
2346			}
2347			ASSERT(len == MMU_PAGESIZE);
2348		}
2349	}
2350
2351	if (hat->sfmmu_rmstat)
2352		hat_resvstat(len, hat->sfmmu_as, addr);
2353
2354	if (flags & HAT_LOAD_NOCONSIST) {
2355		attr |= SFMMU_UNCACHEVTTE;
2356		use_lgpg = 1;
2357	}
2358	if (!pf_is_memory(pfn)) {
2359		attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2360		use_lgpg = 1;
2361		switch (attr & HAT_ORDER_MASK) {
2362			case HAT_STRICTORDER:
2363			case HAT_UNORDERED_OK:
2364				/*
2365				 * we set the side effect bit for all non
2366				 * memory mappings unless merging is ok
2367				 */
2368				attr |= SFMMU_SIDEFFECT;
2369				break;
2370			case HAT_MERGING_OK:
2371			case HAT_LOADCACHING_OK:
2372			case HAT_STORECACHING_OK:
2373				break;
2374			default:
2375				panic("hat_devload: bad attr");
2376				break;
2377		}
2378	}
2379	while (len) {
2380		if (!use_lgpg) {
2381			sfmmu_memtte(&tte, pfn, attr, TTE8K);
2382			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2383			    flags, SFMMU_INVALID_SHMERID);
2384			len -= MMU_PAGESIZE;
2385			addr += MMU_PAGESIZE;
2386			pfn++;
2387			continue;
2388		}
2389		/*
2390		 *  try to use large pages, check va/pa alignments
2391		 *  Note that 32M/256M page sizes are not (yet) supported.
2392		 */
2393		if ((len >= MMU_PAGESIZE4M) &&
2394		    !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2395		    !(disable_large_pages & (1 << TTE4M)) &&
2396		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2397			sfmmu_memtte(&tte, pfn, attr, TTE4M);
2398			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2399			    flags, SFMMU_INVALID_SHMERID);
2400			len -= MMU_PAGESIZE4M;
2401			addr += MMU_PAGESIZE4M;
2402			pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2403		} else if ((len >= MMU_PAGESIZE512K) &&
2404		    !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2405		    !(disable_large_pages & (1 << TTE512K)) &&
2406		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2407			sfmmu_memtte(&tte, pfn, attr, TTE512K);
2408			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2409			    flags, SFMMU_INVALID_SHMERID);
2410			len -= MMU_PAGESIZE512K;
2411			addr += MMU_PAGESIZE512K;
2412			pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2413		} else if ((len >= MMU_PAGESIZE64K) &&
2414		    !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2415		    !(disable_large_pages & (1 << TTE64K)) &&
2416		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2417			sfmmu_memtte(&tte, pfn, attr, TTE64K);
2418			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2419			    flags, SFMMU_INVALID_SHMERID);
2420			len -= MMU_PAGESIZE64K;
2421			addr += MMU_PAGESIZE64K;
2422			pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2423		} else {
2424			sfmmu_memtte(&tte, pfn, attr, TTE8K);
2425			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2426			    flags, SFMMU_INVALID_SHMERID);
2427			len -= MMU_PAGESIZE;
2428			addr += MMU_PAGESIZE;
2429			pfn++;
2430		}
2431	}
2432
2433	/*
2434	 * Check TSB and TLB page sizes.
2435	 */
2436	if ((flags & HAT_LOAD_SHARE) == 0) {
2437		sfmmu_check_page_sizes(hat, 1);
2438	}
2439}
2440
2441void
2442hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2443	struct page **pps, uint_t attr, uint_t flags)
2444{
2445	hat_do_memload_array(hat, addr, len, pps, attr, flags,
2446	    SFMMU_INVALID_SHMERID);
2447}
2448
2449void
2450hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2451	struct page **pps, uint_t attr, uint_t flags,
2452	hat_region_cookie_t rcookie)
2453{
2454	uint_t rid;
2455	if (rcookie == HAT_INVALID_REGION_COOKIE ||
2456	    hat->sfmmu_xhat_provider != NULL) {
2457		hat_do_memload_array(hat, addr, len, pps, attr, flags,
2458		    SFMMU_INVALID_SHMERID);
2459		return;
2460	}
2461	rid = (uint_t)((uint64_t)rcookie);
2462	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2463	hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2464}
2465
2466/*
2467 * Map the largest extend possible out of the page array. The array may NOT
2468 * be in order.  The largest possible mapping a page can have
2469 * is specified in the p_szc field.  The p_szc field
2470 * cannot change as long as there any mappings (large or small)
2471 * to any of the pages that make up the large page. (ie. any
2472 * promotion/demotion of page size is not up to the hat but up to
2473 * the page free list manager).  The array
2474 * should consist of properly aligned contigous pages that are
2475 * part of a big page for a large mapping to be created.
2476 */
2477static void
2478hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2479	struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2480{
2481	int  ttesz;
2482	size_t mapsz;
2483	pgcnt_t	numpg, npgs;
2484	tte_t tte;
2485	page_t *pp;
2486	uint_t large_pages_disable;
2487
2488	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2489	SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2490
2491	if (hat->sfmmu_xhat_provider) {
2492		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2493		XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2494		return;
2495	}
2496
2497	if (hat->sfmmu_rmstat)
2498		hat_resvstat(len, hat->sfmmu_as, addr);
2499
2500#if defined(SF_ERRATA_57)
2501	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2502	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2503	    !(flags & HAT_LOAD_SHARE)) {
2504		cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2505		    "user page executable");
2506		attr &= ~PROT_EXEC;
2507	}
2508#endif
2509
2510	/* Get number of pages */
2511	npgs = len >> MMU_PAGESHIFT;
2512
2513	if (flags & HAT_LOAD_SHARE) {
2514		large_pages_disable = disable_ism_large_pages;
2515	} else {
2516		large_pages_disable = disable_large_pages;
2517	}
2518
2519	if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2520		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2521		    rid);
2522		return;
2523	}
2524
2525	while (npgs >= NHMENTS) {
2526		pp = *pps;
2527		for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2528			/*
2529			 * Check if this page size is disabled.
2530			 */
2531			if (large_pages_disable & (1 << ttesz))
2532				continue;
2533
2534			numpg = TTEPAGES(ttesz);
2535			mapsz = numpg << MMU_PAGESHIFT;
2536			if ((npgs >= numpg) &&
2537			    IS_P2ALIGNED(addr, mapsz) &&
2538			    IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2539				/*
2540				 * At this point we have enough pages and
2541				 * we know the virtual address and the pfn
2542				 * are properly aligned.  We still need
2543				 * to check for physical contiguity but since
2544				 * it is very likely that this is the case
2545				 * we will assume they are so and undo
2546				 * the request if necessary.  It would
2547				 * be great if we could get a hint flag
2548				 * like HAT_CONTIG which would tell us
2549				 * the pages are contigous for sure.
2550				 */
2551				sfmmu_memtte(&tte, (*pps)->p_pagenum,
2552				    attr, ttesz);
2553				if (!sfmmu_tteload_array(hat, &tte, addr,
2554				    pps, flags, rid)) {
2555					break;
2556				}
2557			}
2558		}
2559		if (ttesz == TTE8K) {
2560			/*
2561			 * We were not able to map array using a large page
2562			 * batch a hmeblk or fraction at a time.
2563			 */
2564			numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2565			    & (NHMENTS-1);
2566			numpg = NHMENTS - numpg;
2567			ASSERT(numpg <= npgs);
2568			mapsz = numpg * MMU_PAGESIZE;
2569			sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2570			    numpg, rid);
2571		}
2572		addr += mapsz;
2573		npgs -= numpg;
2574		pps += numpg;
2575	}
2576
2577	if (npgs) {
2578		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2579		    rid);
2580	}
2581
2582	/*
2583	 * Check TSB and TLB page sizes.
2584	 */
2585	if ((flags & HAT_LOAD_SHARE) == 0) {
2586		sfmmu_check_page_sizes(hat, 1);
2587	}
2588}
2589
2590/*
2591 * Function tries to batch 8K pages into the same hme blk.
2592 */
2593static void
2594sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2595		    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2596{
2597	tte_t	tte;
2598	page_t *pp;
2599	struct hmehash_bucket *hmebp;
2600	struct hme_blk *hmeblkp;
2601	int	index;
2602
2603	while (npgs) {
2604		/*
2605		 * Acquire the hash bucket.
2606		 */
2607		hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2608		    rid);
2609		ASSERT(hmebp);
2610
2611		/*
2612		 * Find the hment block.
2613		 */
2614		hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2615		    TTE8K, flags, rid);
2616		ASSERT(hmeblkp);
2617
2618		do {
2619			/*
2620			 * Make the tte.
2621			 */
2622			pp = *pps;
2623			sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2624
2625			/*
2626			 * Add the translation.
2627			 */
2628			(void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2629			    vaddr, pps, flags, rid);
2630
2631			/*
2632			 * Goto next page.
2633			 */
2634			pps++;
2635			npgs--;
2636
2637			/*
2638			 * Goto next address.
2639			 */
2640			vaddr += MMU_PAGESIZE;
2641
2642			/*
2643			 * Don't crossover into a different hmentblk.
2644			 */
2645			index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2646			    (NHMENTS-1));
2647
2648		} while (index != 0 && npgs != 0);
2649
2650		/*
2651		 * Release the hash bucket.
2652		 */
2653
2654		sfmmu_tteload_release_hashbucket(hmebp);
2655	}
2656}
2657
2658/*
2659 * Construct a tte for a page:
2660 *
2661 * tte_valid = 1
2662 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2663 * tte_size = size
2664 * tte_nfo = attr & HAT_NOFAULT
2665 * tte_ie = attr & HAT_STRUCTURE_LE
2666 * tte_hmenum = hmenum
2667 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2668 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2669 * tte_ref = 1 (optimization)
2670 * tte_wr_perm = attr & PROT_WRITE;
2671 * tte_no_sync = attr & HAT_NOSYNC
2672 * tte_lock = attr & SFMMU_LOCKTTE
2673 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2674 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2675 * tte_e = attr & SFMMU_SIDEFFECT
2676 * tte_priv = !(attr & PROT_USER)
2677 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2678 * tte_glb = 0
2679 */
2680void
2681sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2682{
2683	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2684
2685	ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2686	ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2687
2688	if (TTE_IS_NOSYNC(ttep)) {
2689		TTE_SET_REF(ttep);
2690		if (TTE_IS_WRITABLE(ttep)) {
2691			TTE_SET_MOD(ttep);
2692		}
2693	}
2694	if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2695		panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2696	}
2697}
2698
2699/*
2700 * This function will add a translation to the hme_blk and allocate the
2701 * hme_blk if one does not exist.
2702 * If a page structure is specified then it will add the
2703 * corresponding hment to the mapping list.
2704 * It will also update the hmenum field for the tte.
2705 *
2706 * Currently this function is only used for kernel mappings.
2707 * So pass invalid region to sfmmu_tteload_array().
2708 */
2709void
2710sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2711	uint_t flags)
2712{
2713	ASSERT(sfmmup == ksfmmup);
2714	(void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2715	    SFMMU_INVALID_SHMERID);
2716}
2717
2718/*
2719 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2720 * Assumes that a particular page size may only be resident in one TSB.
2721 */
2722static void
2723sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2724{
2725	struct tsb_info *tsbinfop = NULL;
2726	uint64_t tag;
2727	struct tsbe *tsbe_addr;
2728	uint64_t tsb_base;
2729	uint_t tsb_size;
2730	int vpshift = MMU_PAGESHIFT;
2731	int phys = 0;
2732
2733	if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2734		phys = ktsb_phys;
2735		if (ttesz >= TTE4M) {
2736#ifndef sun4v
2737			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2738#endif
2739			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2740			tsb_size = ktsb4m_szcode;
2741		} else {
2742			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2743			tsb_size = ktsb_szcode;
2744		}
2745	} else {
2746		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2747
2748		/*
2749		 * If there isn't a TSB for this page size, or the TSB is
2750		 * swapped out, there is nothing to do.  Note that the latter
2751		 * case seems impossible but can occur if hat_pageunload()
2752		 * is called on an ISM mapping while the process is swapped
2753		 * out.
2754		 */
2755		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2756			return;
2757
2758		/*
2759		 * If another thread is in the middle of relocating a TSB
2760		 * we can't unload the entry so set a flag so that the
2761		 * TSB will be flushed before it can be accessed by the
2762		 * process.
2763		 */
2764		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2765			if (ttep == NULL)
2766				tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2767			return;
2768		}
2769#if defined(UTSB_PHYS)
2770		phys = 1;
2771		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2772#else
2773		tsb_base = (uint64_t)tsbinfop->tsb_va;
2774#endif
2775		tsb_size = tsbinfop->tsb_szc;
2776	}
2777	if (ttesz >= TTE4M)
2778		vpshift = MMU_PAGESHIFT4M;
2779
2780	tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2781	tag = sfmmu_make_tsbtag(vaddr);
2782
2783	if (ttep == NULL) {
2784		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2785	} else {
2786		if (ttesz >= TTE4M) {
2787			SFMMU_STAT(sf_tsb_load4m);
2788		} else {
2789			SFMMU_STAT(sf_tsb_load8k);
2790		}
2791
2792		sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2793	}
2794}
2795
2796/*
2797 * Unmap all entries from [start, end) matching the given page size.
2798 *
2799 * This function is used primarily to unmap replicated 64K or 512K entries
2800 * from the TSB that are inserted using the base page size TSB pointer, but
2801 * it may also be called to unmap a range of addresses from the TSB.
2802 */
2803void
2804sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2805{
2806	struct tsb_info *tsbinfop;
2807	uint64_t tag;
2808	struct tsbe *tsbe_addr;
2809	caddr_t vaddr;
2810	uint64_t tsb_base;
2811	int vpshift, vpgsz;
2812	uint_t tsb_size;
2813	int phys = 0;
2814
2815	/*
2816	 * Assumptions:
2817	 *  If ttesz == 8K, 64K or 512K, we walk through the range 8K
2818	 *  at a time shooting down any valid entries we encounter.
2819	 *
2820	 *  If ttesz >= 4M we walk the range 4M at a time shooting
2821	 *  down any valid mappings we find.
2822	 */
2823	if (sfmmup == ksfmmup) {
2824		phys = ktsb_phys;
2825		if (ttesz >= TTE4M) {
2826#ifndef sun4v
2827			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2828#endif
2829			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2830			tsb_size = ktsb4m_szcode;
2831		} else {
2832			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2833			tsb_size = ktsb_szcode;
2834		}
2835	} else {
2836		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2837
2838		/*
2839		 * If there isn't a TSB for this page size, or the TSB is
2840		 * swapped out, there is nothing to do.  Note that the latter
2841		 * case seems impossible but can occur if hat_pageunload()
2842		 * is called on an ISM mapping while the process is swapped
2843		 * out.
2844		 */
2845		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2846			return;
2847
2848		/*
2849		 * If another thread is in the middle of relocating a TSB
2850		 * we can't unload the entry so set a flag so that the
2851		 * TSB will be flushed before it can be accessed by the
2852		 * process.
2853		 */
2854		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2855			tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2856			return;
2857		}
2858#if defined(UTSB_PHYS)
2859		phys = 1;
2860		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2861#else
2862		tsb_base = (uint64_t)tsbinfop->tsb_va;
2863#endif
2864		tsb_size = tsbinfop->tsb_szc;
2865	}
2866	if (ttesz >= TTE4M) {
2867		vpshift = MMU_PAGESHIFT4M;
2868		vpgsz = MMU_PAGESIZE4M;
2869	} else {
2870		vpshift = MMU_PAGESHIFT;
2871		vpgsz = MMU_PAGESIZE;
2872	}
2873
2874	for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2875		tag = sfmmu_make_tsbtag(vaddr);
2876		tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2877		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2878	}
2879}
2880
2881/*
2882 * Select the optimum TSB size given the number of mappings
2883 * that need to be cached.
2884 */
2885static int
2886sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2887{
2888	int szc = 0;
2889
2890#ifdef DEBUG
2891	if (tsb_grow_stress) {
2892		uint32_t randval = (uint32_t)gettick() >> 4;
2893		return (randval % (tsb_max_growsize + 1));
2894	}
2895#endif	/* DEBUG */
2896
2897	while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2898		szc++;
2899	return (szc);
2900}
2901
2902/*
2903 * This function will add a translation to the hme_blk and allocate the
2904 * hme_blk if one does not exist.
2905 * If a page structure is specified then it will add the
2906 * corresponding hment to the mapping list.
2907 * It will also update the hmenum field for the tte.
2908 * Furthermore, it attempts to create a large page translation
2909 * for <addr,hat> at page array pps.  It assumes addr and first
2910 * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2911 */
2912static int
2913sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2914	page_t **pps, uint_t flags, uint_t rid)
2915{
2916	struct hmehash_bucket *hmebp;
2917	struct hme_blk *hmeblkp;
2918	int 	ret;
2919	uint_t	size;
2920
2921	/*
2922	 * Get mapping size.
2923	 */
2924	size = TTE_CSZ(ttep);
2925	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2926
2927	/*
2928	 * Acquire the hash bucket.
2929	 */
2930	hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2931	ASSERT(hmebp);
2932
2933	/*
2934	 * Find the hment block.
2935	 */
2936	hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2937	    rid);
2938	ASSERT(hmeblkp);
2939
2940	/*
2941	 * Add the translation.
2942	 */
2943	ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2944	    rid);
2945
2946	/*
2947	 * Release the hash bucket.
2948	 */
2949	sfmmu_tteload_release_hashbucket(hmebp);
2950
2951	return (ret);
2952}
2953
2954/*
2955 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2956 */
2957static struct hmehash_bucket *
2958sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2959    uint_t rid)
2960{
2961	struct hmehash_bucket *hmebp;
2962	int hmeshift;
2963	void *htagid = sfmmutohtagid(sfmmup, rid);
2964
2965	ASSERT(htagid != NULL);
2966
2967	hmeshift = HME_HASH_SHIFT(size);
2968
2969	hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2970
2971	SFMMU_HASH_LOCK(hmebp);
2972
2973	return (hmebp);
2974}
2975
2976/*
2977 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2978 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2979 * allocated.
2980 */
2981static struct hme_blk *
2982sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2983	caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2984{
2985	hmeblk_tag hblktag;
2986	int hmeshift;
2987	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2988
2989	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2990
2991	hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2992	ASSERT(hblktag.htag_id != NULL);
2993	hmeshift = HME_HASH_SHIFT(size);
2994	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2995	hblktag.htag_rehash = HME_HASH_REHASH(size);
2996	hblktag.htag_rid = rid;
2997
2998ttearray_realloc:
2999
3000	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3001
3002	/*
3003	 * We block until hblk_reserve_lock is released; it's held by
3004	 * the thread, temporarily using hblk_reserve, until hblk_reserve is
3005	 * replaced by a hblk from sfmmu8_cache.
3006	 */
3007	if (hmeblkp == (struct hme_blk *)hblk_reserve &&
3008	    hblk_reserve_thread != curthread) {
3009		SFMMU_HASH_UNLOCK(hmebp);
3010		mutex_enter(&hblk_reserve_lock);
3011		mutex_exit(&hblk_reserve_lock);
3012		SFMMU_STAT(sf_hblk_reserve_hit);
3013		SFMMU_HASH_LOCK(hmebp);
3014		goto ttearray_realloc;
3015	}
3016
3017	if (hmeblkp == NULL) {
3018		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3019		    hblktag, flags, rid);
3020		ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3021		ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3022	} else {
3023		/*
3024		 * It is possible for 8k and 64k hblks to collide since they
3025		 * have the same rehash value. This is because we
3026		 * lazily free hblks and 8K/64K blks could be lingering.
3027		 * If we find size mismatch we free the block and & try again.
3028		 */
3029		if (get_hblk_ttesz(hmeblkp) != size) {
3030			ASSERT(!hmeblkp->hblk_vcnt);
3031			ASSERT(!hmeblkp->hblk_hmecnt);
3032			sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3033			    &list, 0);
3034			goto ttearray_realloc;
3035		}
3036		if (hmeblkp->hblk_shw_bit) {
3037			/*
3038			 * if the hblk was previously used as a shadow hblk then
3039			 * we will change it to a normal hblk
3040			 */
3041			ASSERT(!hmeblkp->hblk_shared);
3042			if (hmeblkp->hblk_shw_mask) {
3043				sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3044				ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3045				goto ttearray_realloc;
3046			} else {
3047				hmeblkp->hblk_shw_bit = 0;
3048			}
3049		}
3050		SFMMU_STAT(sf_hblk_hit);
3051	}
3052
3053	/*
3054	 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3055	 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3056	 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3057	 * just add these hmeblks to the per-cpu pending queue.
3058	 */
3059	sfmmu_hblks_list_purge(&list, 1);
3060
3061	ASSERT(get_hblk_ttesz(hmeblkp) == size);
3062	ASSERT(!hmeblkp->hblk_shw_bit);
3063	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3064	ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3065	ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3066
3067	return (hmeblkp);
3068}
3069
3070/*
3071 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3072 * otherwise.
3073 */
3074static int
3075sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3076	caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3077{
3078	page_t *pp = *pps;
3079	int hmenum, size, remap;
3080	tte_t tteold, flush_tte;
3081#ifdef DEBUG
3082	tte_t orig_old;
3083#endif /* DEBUG */
3084	struct sf_hment *sfhme;
3085	kmutex_t *pml, *pmtx;
3086	hatlock_t *hatlockp;
3087	int myflt;
3088
3089	/*
3090	 * remove this panic when we decide to let user virtual address
3091	 * space be >= USERLIMIT.
3092	 */
3093	if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3094		panic("user addr %p in kernel space", (void *)vaddr);
3095#if defined(TTE_IS_GLOBAL)
3096	if (TTE_IS_GLOBAL(ttep))
3097		panic("sfmmu_tteload: creating global tte");
3098#endif
3099
3100#ifdef DEBUG
3101	if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3102	    !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3103		panic("sfmmu_tteload: non cacheable memory tte");
3104#endif /* DEBUG */
3105
3106	/* don't simulate dirty bit for writeable ISM/DISM mappings */
3107	if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3108		TTE_SET_REF(ttep);
3109		TTE_SET_MOD(ttep);
3110	}
3111
3112	if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3113	    !TTE_IS_MOD(ttep)) {
3114		/*
3115		 * Don't load TSB for dummy as in ISM.  Also don't preload
3116		 * the TSB if the TTE isn't writable since we're likely to
3117		 * fault on it again -- preloading can be fairly expensive.
3118		 */
3119		flags |= SFMMU_NO_TSBLOAD;
3120	}
3121
3122	size = TTE_CSZ(ttep);
3123	switch (size) {
3124	case TTE8K:
3125		SFMMU_STAT(sf_tteload8k);
3126		break;
3127	case TTE64K:
3128		SFMMU_STAT(sf_tteload64k);
3129		break;
3130	case TTE512K:
3131		SFMMU_STAT(sf_tteload512k);
3132		break;
3133	case TTE4M:
3134		SFMMU_STAT(sf_tteload4m);
3135		break;
3136	case (TTE32M):
3137		SFMMU_STAT(sf_tteload32m);
3138		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3139		break;
3140	case (TTE256M):
3141		SFMMU_STAT(sf_tteload256m);
3142		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3143		break;
3144	}
3145
3146	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3147	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3148	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3149	ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3150
3151	HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3152
3153	/*
3154	 * Need to grab mlist lock here so that pageunload
3155	 * will not change tte behind us.
3156	 */
3157	if (pp) {
3158		pml = sfmmu_mlist_enter(pp);
3159	}
3160
3161	sfmmu_copytte(&sfhme->hme_tte, &tteold);
3162	/*
3163	 * Look for corresponding hment and if valid verify
3164	 * pfns are equal.
3165	 */
3166	remap = TTE_IS_VALID(&tteold);
3167	if (remap) {
3168		pfn_t	new_pfn, old_pfn;
3169
3170		old_pfn = TTE_TO_PFN(vaddr, &tteold);
3171		new_pfn = TTE_TO_PFN(vaddr, ttep);
3172
3173		if (flags & HAT_LOAD_REMAP) {
3174			/* make sure we are remapping same type of pages */
3175			if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3176				panic("sfmmu_tteload - tte remap io<->memory");
3177			}
3178			if (old_pfn != new_pfn &&
3179			    (pp != NULL || sfhme->hme_page != NULL)) {
3180				panic("sfmmu_tteload - tte remap pp != NULL");
3181			}
3182		} else if (old_pfn != new_pfn) {
3183			panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3184			    (void *)hmeblkp);
3185		}
3186		ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3187	}
3188
3189	if (pp) {
3190		if (size == TTE8K) {
3191#ifdef VAC
3192			/*
3193			 * Handle VAC consistency
3194			 */
3195			if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3196				sfmmu_vac_conflict(sfmmup, vaddr, pp);
3197			}
3198#endif
3199
3200			if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3201				pmtx = sfmmu_page_enter(pp);
3202				PP_CLRRO(pp);
3203				sfmmu_page_exit(pmtx);
3204			} else if (!PP_ISMAPPED(pp) &&
3205			    (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3206				pmtx = sfmmu_page_enter(pp);
3207				if (!(PP_ISMOD(pp))) {
3208					PP_SETRO(pp);
3209				}
3210				sfmmu_page_exit(pmtx);
3211			}
3212
3213		} else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3214			/*
3215			 * sfmmu_pagearray_setup failed so return
3216			 */
3217			sfmmu_mlist_exit(pml);
3218			return (1);
3219		}
3220	}
3221
3222	/*
3223	 * Make sure hment is not on a mapping list.
3224	 */
3225	ASSERT(remap || (sfhme->hme_page == NULL));
3226
3227	/* if it is not a remap then hme->next better be NULL */
3228	ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3229
3230	if (flags & HAT_LOAD_LOCK) {
3231		if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3232			panic("too high lckcnt-hmeblk %p",
3233			    (void *)hmeblkp);
3234		}
3235		atomic_add_32(&hmeblkp->hblk_lckcnt, 1);
3236
3237		HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3238	}
3239
3240#ifdef VAC
3241	if (pp && PP_ISNC(pp)) {
3242		/*
3243		 * If the physical page is marked to be uncacheable, like
3244		 * by a vac conflict, make sure the new mapping is also
3245		 * uncacheable.
3246		 */
3247		TTE_CLR_VCACHEABLE(ttep);
3248		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3249	}
3250#endif
3251	ttep->tte_hmenum = hmenum;
3252
3253#ifdef DEBUG
3254	orig_old = tteold;
3255#endif /* DEBUG */
3256
3257	while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3258		if ((sfmmup == KHATID) &&
3259		    (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3260			sfmmu_copytte(&sfhme->hme_tte, &tteold);
3261		}
3262#ifdef DEBUG
3263		chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3264#endif /* DEBUG */
3265	}
3266	ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3267
3268	if (!TTE_IS_VALID(&tteold)) {
3269
3270		atomic_add_16(&hmeblkp->hblk_vcnt, 1);
3271		if (rid == SFMMU_INVALID_SHMERID) {
3272			atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
3273		} else {
3274			sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3275			sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3276			/*
3277			 * We already accounted for region ttecnt's in sfmmu
3278			 * during hat_join_region() processing. Here we
3279			 * only update ttecnt's in region struture.
3280			 */
3281			atomic_add_long(&rgnp->rgn_ttecnt[size], 1);
3282		}
3283	}
3284
3285	myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3286	if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3287	    sfmmup != ksfmmup) {
3288		uchar_t tteflag = 1 << size;
3289		if (rid == SFMMU_INVALID_SHMERID) {
3290			if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3291				hatlockp = sfmmu_hat_enter(sfmmup);
3292				sfmmup->sfmmu_tteflags |= tteflag;
3293				sfmmu_hat_exit(hatlockp);
3294			}
3295		} else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3296			hatlockp = sfmmu_hat_enter(sfmmup);
3297			sfmmup->sfmmu_rtteflags |= tteflag;
3298			sfmmu_hat_exit(hatlockp);
3299		}
3300		/*
3301		 * Update the current CPU tsbmiss area, so the current thread
3302		 * won't need to take the tsbmiss for the new pagesize.
3303		 * The other threads in the process will update their tsb
3304		 * miss area lazily in sfmmu_tsbmiss_exception() when they
3305		 * fail to find the translation for a newly added pagesize.
3306		 */
3307		if (size > TTE64K && myflt) {
3308			struct tsbmiss *tsbmp;
3309			kpreempt_disable();
3310			tsbmp = &tsbmiss_area[CPU->cpu_id];
3311			if (rid == SFMMU_INVALID_SHMERID) {
3312				if (!(tsbmp->uhat_tteflags & tteflag)) {
3313					tsbmp->uhat_tteflags |= tteflag;
3314				}
3315			} else {
3316				if (!(tsbmp->uhat_rtteflags & tteflag)) {
3317					tsbmp->uhat_rtteflags |= tteflag;
3318				}
3319			}
3320			kpreempt_enable();
3321		}
3322	}
3323
3324	if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3325	    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3326		hatlockp = sfmmu_hat_enter(sfmmup);
3327		SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3328		sfmmu_hat_exit(hatlockp);
3329	}
3330
3331	flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3332	    hw_tte.tte_intlo;
3333	flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3334	    hw_tte.tte_inthi;
3335
3336	if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3337		/*
3338		 * If remap and new tte differs from old tte we need
3339		 * to sync the mod bit and flush TLB/TSB.  We don't
3340		 * need to sync ref bit because we currently always set
3341		 * ref bit in tteload.
3342		 */
3343		ASSERT(TTE_IS_REF(ttep));
3344		if (TTE_IS_MOD(&tteold)) {
3345			sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3346		}
3347		/*
3348		 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3349		 * hmes are only used for read only text. Adding this code for
3350		 * completeness and future use of shared hmeblks with writable
3351		 * mappings of VMODSORT vnodes.
3352		 */
3353		if (hmeblkp->hblk_shared) {
3354			cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3355			    sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3356			xt_sync(cpuset);
3357			SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3358		} else {
3359			sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3360			xt_sync(sfmmup->sfmmu_cpusran);
3361		}
3362	}
3363
3364	if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3365		/*
3366		 * We only preload 8K and 4M mappings into the TSB, since
3367		 * 64K and 512K mappings are replicated and hence don't
3368		 * have a single, unique TSB entry. Ditto for 32M/256M.
3369		 */
3370		if (size == TTE8K || size == TTE4M) {
3371			sf_scd_t *scdp;
3372			hatlockp = sfmmu_hat_enter(sfmmup);
3373			/*
3374			 * Don't preload private TSB if the mapping is used
3375			 * by the shctx in the SCD.
3376			 */
3377			scdp = sfmmup->sfmmu_scdp;
3378			if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3379			    !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3380				sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3381				    size);
3382			}
3383			sfmmu_hat_exit(hatlockp);
3384		}
3385	}
3386	if (pp) {
3387		if (!remap) {
3388			HME_ADD(sfhme, pp);
3389			atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
3390			ASSERT(hmeblkp->hblk_hmecnt > 0);
3391
3392			/*
3393			 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3394			 * see pageunload() for comment.
3395			 */
3396		}
3397		sfmmu_mlist_exit(pml);
3398	}
3399
3400	return (0);
3401}
3402/*
3403 * Function unlocks hash bucket.
3404 */
3405static void
3406sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3407{
3408	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3409	SFMMU_HASH_UNLOCK(hmebp);
3410}
3411
3412/*
3413 * function which checks and sets up page array for a large
3414 * translation.  Will set p_vcolor, p_index, p_ro fields.
3415 * Assumes addr and pfnum of first page are properly aligned.
3416 * Will check for physical contiguity. If check fails it return
3417 * non null.
3418 */
3419static int
3420sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3421{
3422	int 	i, index, ttesz;
3423	pfn_t	pfnum;
3424	pgcnt_t	npgs;
3425	page_t *pp, *pp1;
3426	kmutex_t *pmtx;
3427#ifdef VAC
3428	int osz;
3429	int cflags = 0;
3430	int vac_err = 0;
3431#endif
3432	int newidx = 0;
3433
3434	ttesz = TTE_CSZ(ttep);
3435
3436	ASSERT(ttesz > TTE8K);
3437
3438	npgs = TTEPAGES(ttesz);
3439	index = PAGESZ_TO_INDEX(ttesz);
3440
3441	pfnum = (*pps)->p_pagenum;
3442	ASSERT(IS_P2ALIGNED(pfnum, npgs));
3443
3444	/*
3445	 * Save the first pp so we can do HAT_TMPNC at the end.
3446	 */
3447	pp1 = *pps;
3448#ifdef VAC
3449	osz = fnd_mapping_sz(pp1);
3450#endif
3451
3452	for (i = 0; i < npgs; i++, pps++) {
3453		pp = *pps;
3454		ASSERT(PAGE_LOCKED(pp));
3455		ASSERT(pp->p_szc >= ttesz);
3456		ASSERT(pp->p_szc == pp1->p_szc);
3457		ASSERT(sfmmu_mlist_held(pp));
3458
3459		/*
3460		 * XXX is it possible to maintain P_RO on the root only?
3461		 */
3462		if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3463			pmtx = sfmmu_page_enter(pp);
3464			PP_CLRRO(pp);
3465			sfmmu_page_exit(pmtx);
3466		} else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3467		    !PP_ISMOD(pp)) {
3468			pmtx = sfmmu_page_enter(pp);
3469			if (!(PP_ISMOD(pp))) {
3470				PP_SETRO(pp);
3471			}
3472			sfmmu_page_exit(pmtx);
3473		}
3474
3475		/*
3476		 * If this is a remap we skip vac & contiguity checks.
3477		 */
3478		if (remap)
3479			continue;
3480
3481		/*
3482		 * set p_vcolor and detect any vac conflicts.
3483		 */
3484#ifdef VAC
3485		if (vac_err == 0) {
3486			vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3487
3488		}
3489#endif
3490
3491		/*
3492		 * Save current index in case we need to undo it.
3493		 * Note: "PAGESZ_TO_INDEX(sz)	(1 << (sz))"
3494		 *	"SFMMU_INDEX_SHIFT	6"
3495		 *	 "SFMMU_INDEX_MASK	((1 << SFMMU_INDEX_SHIFT) - 1)"
3496		 *	 "PP_MAPINDEX(p_index)	(p_index & SFMMU_INDEX_MASK)"
3497		 *
3498		 * So:	index = PAGESZ_TO_INDEX(ttesz);
3499		 *	if ttesz == 1 then index = 0x2
3500		 *		    2 then index = 0x4
3501		 *		    3 then index = 0x8
3502		 *		    4 then index = 0x10
3503		 *		    5 then index = 0x20
3504		 * The code below checks if it's a new pagesize (ie, newidx)
3505		 * in case we need to take it back out of p_index,
3506		 * and then or's the new index into the existing index.
3507		 */
3508		if ((PP_MAPINDEX(pp) & index) == 0)
3509			newidx = 1;
3510		pp->p_index = (PP_MAPINDEX(pp) | index);
3511
3512		/*
3513		 * contiguity check
3514		 */
3515		if (pp->p_pagenum != pfnum) {
3516			/*
3517			 * If we fail the contiguity test then
3518			 * the only thing we need to fix is the p_index field.
3519			 * We might get a few extra flushes but since this
3520			 * path is rare that is ok.  The p_ro field will
3521			 * get automatically fixed on the next tteload to
3522			 * the page.  NO TNC bit is set yet.
3523			 */
3524			while (i >= 0) {
3525				pp = *pps;
3526				if (newidx)
3527					pp->p_index = (PP_MAPINDEX(pp) &
3528					    ~index);
3529				pps--;
3530				i--;
3531			}
3532			return (1);
3533		}
3534		pfnum++;
3535		addr += MMU_PAGESIZE;
3536	}
3537
3538#ifdef VAC
3539	if (vac_err) {
3540		if (ttesz > osz) {
3541			/*
3542			 * There are some smaller mappings that causes vac
3543			 * conflicts. Convert all existing small mappings to
3544			 * TNC.
3545			 */
3546			SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3547			sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3548			    npgs);
3549		} else {
3550			/* EMPTY */
3551			/*
3552			 * If there exists an big page mapping,
3553			 * that means the whole existing big page
3554			 * has TNC setting already. No need to covert to
3555			 * TNC again.
3556			 */
3557			ASSERT(PP_ISTNC(pp1));
3558		}
3559	}
3560#endif	/* VAC */
3561
3562	return (0);
3563}
3564
3565#ifdef VAC
3566/*
3567 * Routine that detects vac consistency for a large page. It also
3568 * sets virtual color for all pp's for this big mapping.
3569 */
3570static int
3571sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3572{
3573	int vcolor, ocolor;
3574
3575	ASSERT(sfmmu_mlist_held(pp));
3576
3577	if (PP_ISNC(pp)) {
3578		return (HAT_TMPNC);
3579	}
3580
3581	vcolor = addr_to_vcolor(addr);
3582	if (PP_NEWPAGE(pp)) {
3583		PP_SET_VCOLOR(pp, vcolor);
3584		return (0);
3585	}
3586
3587	ocolor = PP_GET_VCOLOR(pp);
3588	if (ocolor == vcolor) {
3589		return (0);
3590	}
3591
3592	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3593		/*
3594		 * Previous user of page had a differnet color
3595		 * but since there are no current users
3596		 * we just flush the cache and change the color.
3597		 * As an optimization for large pages we flush the
3598		 * entire cache of that color and set a flag.
3599		 */
3600		SFMMU_STAT(sf_pgcolor_conflict);
3601		if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3602			CacheColor_SetFlushed(*cflags, ocolor);
3603			sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3604		}
3605		PP_SET_VCOLOR(pp, vcolor);
3606		return (0);
3607	}
3608
3609	/*
3610	 * We got a real conflict with a current mapping.
3611	 * set flags to start unencaching all mappings
3612	 * and return failure so we restart looping
3613	 * the pp array from the beginning.
3614	 */
3615	return (HAT_TMPNC);
3616}
3617#endif	/* VAC */
3618
3619/*
3620 * creates a large page shadow hmeblk for a tte.
3621 * The purpose of this routine is to allow us to do quick unloads because
3622 * the vm layer can easily pass a very large but sparsely populated range.
3623 */
3624static struct hme_blk *
3625sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3626{
3627	struct hmehash_bucket *hmebp;
3628	hmeblk_tag hblktag;
3629	int hmeshift, size, vshift;
3630	uint_t shw_mask, newshw_mask;
3631	struct hme_blk *hmeblkp;
3632
3633	ASSERT(sfmmup != KHATID);
3634	if (mmu_page_sizes == max_mmu_page_sizes) {
3635		ASSERT(ttesz < TTE256M);
3636	} else {
3637		ASSERT(ttesz < TTE4M);
3638		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3639		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3640	}
3641
3642	if (ttesz == TTE8K) {
3643		size = TTE512K;
3644	} else {
3645		size = ++ttesz;
3646	}
3647
3648	hblktag.htag_id = sfmmup;
3649	hmeshift = HME_HASH_SHIFT(size);
3650	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3651	hblktag.htag_rehash = HME_HASH_REHASH(size);
3652	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3653	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3654
3655	SFMMU_HASH_LOCK(hmebp);
3656
3657	HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3658	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3659	if (hmeblkp == NULL) {
3660		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3661		    hblktag, flags, SFMMU_INVALID_SHMERID);
3662	}
3663	ASSERT(hmeblkp);
3664	if (!hmeblkp->hblk_shw_mask) {
3665		/*
3666		 * if this is a unused hblk it was just allocated or could
3667		 * potentially be a previous large page hblk so we need to
3668		 * set the shadow bit.
3669		 */
3670		ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3671		hmeblkp->hblk_shw_bit = 1;
3672	} else if (hmeblkp->hblk_shw_bit == 0) {
3673		panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3674		    (void *)hmeblkp);
3675	}
3676	ASSERT(hmeblkp->hblk_shw_bit == 1);
3677	ASSERT(!hmeblkp->hblk_shared);
3678	vshift = vaddr_to_vshift(hblktag, vaddr, size);
3679	ASSERT(vshift < 8);
3680	/*
3681	 * Atomically set shw mask bit
3682	 */
3683	do {
3684		shw_mask = hmeblkp->hblk_shw_mask;
3685		newshw_mask = shw_mask | (1 << vshift);
3686		newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
3687		    newshw_mask);
3688	} while (newshw_mask != shw_mask);
3689
3690	SFMMU_HASH_UNLOCK(hmebp);
3691
3692	return (hmeblkp);
3693}
3694
3695/*
3696 * This routine cleanup a previous shadow hmeblk and changes it to
3697 * a regular hblk.  This happens rarely but it is possible
3698 * when a process wants to use large pages and there are hblks still
3699 * lying around from the previous as that used these hmeblks.
3700 * The alternative was to cleanup the shadow hblks at unload time
3701 * but since so few user processes actually use large pages, it is
3702 * better to be lazy and cleanup at this time.
3703 */
3704static void
3705sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3706	struct hmehash_bucket *hmebp)
3707{
3708	caddr_t addr, endaddr;
3709	int hashno, size;
3710
3711	ASSERT(hmeblkp->hblk_shw_bit);
3712	ASSERT(!hmeblkp->hblk_shared);
3713
3714	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3715
3716	if (!hmeblkp->hblk_shw_mask) {
3717		hmeblkp->hblk_shw_bit = 0;
3718		return;
3719	}
3720	addr = (caddr_t)get_hblk_base(hmeblkp);
3721	endaddr = get_hblk_endaddr(hmeblkp);
3722	size = get_hblk_ttesz(hmeblkp);
3723	hashno = size - 1;
3724	ASSERT(hashno > 0);
3725	SFMMU_HASH_UNLOCK(hmebp);
3726
3727	sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3728
3729	SFMMU_HASH_LOCK(hmebp);
3730}
3731
3732static void
3733sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3734	int hashno)
3735{
3736	int hmeshift, shadow = 0;
3737	hmeblk_tag hblktag;
3738	struct hmehash_bucket *hmebp;
3739	struct hme_blk *hmeblkp;
3740	struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3741
3742	ASSERT(hashno > 0);
3743	hblktag.htag_id = sfmmup;
3744	hblktag.htag_rehash = hashno;
3745	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3746
3747	hmeshift = HME_HASH_SHIFT(hashno);
3748
3749	while (addr < endaddr) {
3750		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3751		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3752		SFMMU_HASH_LOCK(hmebp);
3753		/* inline HME_HASH_SEARCH */
3754		hmeblkp = hmebp->hmeblkp;
3755		pr_hblk = NULL;
3756		while (hmeblkp) {
3757			if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3758				/* found hme_blk */
3759				ASSERT(!hmeblkp->hblk_shared);
3760				if (hmeblkp->hblk_shw_bit) {
3761					if (hmeblkp->hblk_shw_mask) {
3762						shadow = 1;
3763						sfmmu_shadow_hcleanup(sfmmup,
3764						    hmeblkp, hmebp);
3765						break;
3766					} else {
3767						hmeblkp->hblk_shw_bit = 0;
3768					}
3769				}
3770
3771				/*
3772				 * Hblk_hmecnt and hblk_vcnt could be non zero
3773				 * since hblk_unload() does not gurantee that.
3774				 *
3775				 * XXX - this could cause tteload() to spin
3776				 * where sfmmu_shadow_hcleanup() is called.
3777				 */
3778			}
3779
3780			nx_hblk = hmeblkp->hblk_next;
3781			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3782				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3783				    &list, 0);
3784			} else {
3785				pr_hblk = hmeblkp;
3786			}
3787			hmeblkp = nx_hblk;
3788		}
3789
3790		SFMMU_HASH_UNLOCK(hmebp);
3791
3792		if (shadow) {
3793			/*
3794			 * We found another shadow hblk so cleaned its
3795			 * children.  We need to go back and cleanup
3796			 * the original hblk so we don't change the
3797			 * addr.
3798			 */
3799			shadow = 0;
3800		} else {
3801			addr = (caddr_t)roundup((uintptr_t)addr + 1,
3802			    (1 << hmeshift));
3803		}
3804	}
3805	sfmmu_hblks_list_purge(&list, 0);
3806}
3807
3808/*
3809 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3810 * may still linger on after pageunload.
3811 */
3812static void
3813sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3814{
3815	int hmeshift;
3816	hmeblk_tag hblktag;
3817	struct hmehash_bucket *hmebp;
3818	struct hme_blk *hmeblkp;
3819	struct hme_blk *pr_hblk;
3820	struct hme_blk *list = NULL;
3821
3822	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3823	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3824
3825	hmeshift = HME_HASH_SHIFT(ttesz);
3826	hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3827	hblktag.htag_rehash = ttesz;
3828	hblktag.htag_rid = rid;
3829	hblktag.htag_id = srdp;
3830	hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3831
3832	SFMMU_HASH_LOCK(hmebp);
3833	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3834	if (hmeblkp != NULL) {
3835		ASSERT(hmeblkp->hblk_shared);
3836		ASSERT(!hmeblkp->hblk_shw_bit);
3837		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3838			panic("sfmmu_cleanup_rhblk: valid hmeblk");
3839		}
3840		ASSERT(!hmeblkp->hblk_lckcnt);
3841		sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3842		    &list, 0);
3843	}
3844	SFMMU_HASH_UNLOCK(hmebp);
3845	sfmmu_hblks_list_purge(&list, 0);
3846}
3847
3848/* ARGSUSED */
3849static void
3850sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3851    size_t r_size, void *r_obj, u_offset_t r_objoff)
3852{
3853}
3854
3855/*
3856 * Searches for an hmeblk which maps addr, then unloads this mapping
3857 * and updates *eaddrp, if the hmeblk is found.
3858 */
3859static void
3860sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3861    caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3862{
3863	int hmeshift;
3864	hmeblk_tag hblktag;
3865	struct hmehash_bucket *hmebp;
3866	struct hme_blk *hmeblkp;
3867	struct hme_blk *pr_hblk;
3868	struct hme_blk *list = NULL;
3869
3870	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3871	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3872	ASSERT(ttesz >= HBLK_MIN_TTESZ);
3873
3874	hmeshift = HME_HASH_SHIFT(ttesz);
3875	hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3876	hblktag.htag_rehash = ttesz;
3877	hblktag.htag_rid = rid;
3878	hblktag.htag_id = srdp;
3879	hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3880
3881	SFMMU_HASH_LOCK(hmebp);
3882	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3883	if (hmeblkp != NULL) {
3884		ASSERT(hmeblkp->hblk_shared);
3885		ASSERT(!hmeblkp->hblk_lckcnt);
3886		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3887			*eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3888			    eaddr, NULL, HAT_UNLOAD);
3889			ASSERT(*eaddrp > addr);
3890		}
3891		ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3892		sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3893		    &list, 0);
3894	}
3895	SFMMU_HASH_UNLOCK(hmebp);
3896	sfmmu_hblks_list_purge(&list, 0);
3897}
3898
3899static void
3900sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3901{
3902	int ttesz = rgnp->rgn_pgszc;
3903	size_t rsz = rgnp->rgn_size;
3904	caddr_t rsaddr = rgnp->rgn_saddr;
3905	caddr_t readdr = rsaddr + rsz;
3906	caddr_t rhsaddr;
3907	caddr_t va;
3908	uint_t rid = rgnp->rgn_id;
3909	caddr_t cbsaddr;
3910	caddr_t cbeaddr;
3911	hat_rgn_cb_func_t rcbfunc;
3912	ulong_t cnt;
3913
3914	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3915	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3916
3917	ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3918	ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3919	if (ttesz < HBLK_MIN_TTESZ) {
3920		ttesz = HBLK_MIN_TTESZ;
3921		rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3922	} else {
3923		rhsaddr = rsaddr;
3924	}
3925
3926	if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3927		rcbfunc = sfmmu_rgn_cb_noop;
3928	}
3929
3930	while (ttesz >= HBLK_MIN_TTESZ) {
3931		cbsaddr = rsaddr;
3932		cbeaddr = rsaddr;
3933		if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3934			ttesz--;
3935			continue;
3936		}
3937		cnt = 0;
3938		va = rsaddr;
3939		while (va < readdr) {
3940			ASSERT(va >= rhsaddr);
3941			if (va != cbeaddr) {
3942				if (cbeaddr != cbsaddr) {
3943					ASSERT(cbeaddr > cbsaddr);
3944					(*rcbfunc)(cbsaddr, cbeaddr,
3945					    rsaddr, rsz, rgnp->rgn_obj,
3946					    rgnp->rgn_objoff);
3947				}
3948				cbsaddr = va;
3949				cbeaddr = va;
3950			}
3951			sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3952			    ttesz, &cbeaddr);
3953			cnt++;
3954			va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3955		}
3956		if (cbeaddr != cbsaddr) {
3957			ASSERT(cbeaddr > cbsaddr);
3958			(*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3959			    rsz, rgnp->rgn_obj,
3960			    rgnp->rgn_objoff);
3961		}
3962		ttesz--;
3963	}
3964}
3965
3966/*
3967 * Release one hardware address translation lock on the given address range.
3968 */
3969void
3970hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3971{
3972	struct hmehash_bucket *hmebp;
3973	hmeblk_tag hblktag;
3974	int hmeshift, hashno = 1;
3975	struct hme_blk *hmeblkp, *list = NULL;
3976	caddr_t endaddr;
3977
3978	ASSERT(sfmmup != NULL);
3979	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3980
3981	ASSERT((sfmmup == ksfmmup) ||
3982	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3983	ASSERT((len & MMU_PAGEOFFSET) == 0);
3984	endaddr = addr + len;
3985	hblktag.htag_id = sfmmup;
3986	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3987
3988	/*
3989	 * Spitfire supports 4 page sizes.
3990	 * Most pages are expected to be of the smallest page size (8K) and
3991	 * these will not need to be rehashed. 64K pages also don't need to be
3992	 * rehashed because an hmeblk spans 64K of address space. 512K pages
3993	 * might need 1 rehash and and 4M pages might need 2 rehashes.
3994	 */
3995	while (addr < endaddr) {
3996		hmeshift = HME_HASH_SHIFT(hashno);
3997		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3998		hblktag.htag_rehash = hashno;
3999		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4000
4001		SFMMU_HASH_LOCK(hmebp);
4002
4003		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4004		if (hmeblkp != NULL) {
4005			ASSERT(!hmeblkp->hblk_shared);
4006			/*
4007			 * If we encounter a shadow hmeblk then
4008			 * we know there are no valid hmeblks mapping
4009			 * this address at this size or larger.
4010			 * Just increment address by the smallest
4011			 * page size.
4012			 */
4013			if (hmeblkp->hblk_shw_bit) {
4014				addr += MMU_PAGESIZE;
4015			} else {
4016				addr = sfmmu_hblk_unlock(hmeblkp, addr,
4017				    endaddr);
4018			}
4019			SFMMU_HASH_UNLOCK(hmebp);
4020			hashno = 1;
4021			continue;
4022		}
4023		SFMMU_HASH_UNLOCK(hmebp);
4024
4025		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4026			/*
4027			 * We have traversed the whole list and rehashed
4028			 * if necessary without finding the address to unlock
4029			 * which should never happen.
4030			 */
4031			panic("sfmmu_unlock: addr not found. "
4032			    "addr %p hat %p", (void *)addr, (void *)sfmmup);
4033		} else {
4034			hashno++;
4035		}
4036	}
4037
4038	sfmmu_hblks_list_purge(&list, 0);
4039}
4040
4041void
4042hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4043    hat_region_cookie_t rcookie)
4044{
4045	sf_srd_t *srdp;
4046	sf_region_t *rgnp;
4047	int ttesz;
4048	uint_t rid;
4049	caddr_t eaddr;
4050	caddr_t va;
4051	int hmeshift;
4052	hmeblk_tag hblktag;
4053	struct hmehash_bucket *hmebp;
4054	struct hme_blk *hmeblkp;
4055	struct hme_blk *pr_hblk;
4056	struct hme_blk *list;
4057
4058	if (rcookie == HAT_INVALID_REGION_COOKIE) {
4059		hat_unlock(sfmmup, addr, len);
4060		return;
4061	}
4062
4063	ASSERT(sfmmup != NULL);
4064	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4065	ASSERT(sfmmup != ksfmmup);
4066
4067	srdp = sfmmup->sfmmu_srdp;
4068	rid = (uint_t)((uint64_t)rcookie);
4069	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
4070	eaddr = addr + len;
4071	va = addr;
4072	list = NULL;
4073	rgnp = srdp->srd_hmergnp[rid];
4074	SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4075
4076	ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4077	ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4078	if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4079		ttesz = HBLK_MIN_TTESZ;
4080	} else {
4081		ttesz = rgnp->rgn_pgszc;
4082	}
4083	while (va < eaddr) {
4084		while (ttesz < rgnp->rgn_pgszc &&
4085		    IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4086			ttesz++;
4087		}
4088		while (ttesz >= HBLK_MIN_TTESZ) {
4089			if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4090				ttesz--;
4091				continue;
4092			}
4093			hmeshift = HME_HASH_SHIFT(ttesz);
4094			hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4095			hblktag.htag_rehash = ttesz;
4096			hblktag.htag_rid = rid;
4097			hblktag.htag_id = srdp;
4098			hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4099			SFMMU_HASH_LOCK(hmebp);
4100			HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4101			    &list);
4102			if (hmeblkp == NULL) {
4103				SFMMU_HASH_UNLOCK(hmebp);
4104				ttesz--;
4105				continue;
4106			}
4107			ASSERT(hmeblkp->hblk_shared);
4108			va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4109			ASSERT(va >= eaddr ||
4110			    IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4111			SFMMU_HASH_UNLOCK(hmebp);
4112			break;
4113		}
4114		if (ttesz < HBLK_MIN_TTESZ) {
4115			panic("hat_unlock_region: addr not found "
4116			    "addr %p hat %p", (void *)va, (void *)sfmmup);
4117		}
4118	}
4119	sfmmu_hblks_list_purge(&list, 0);
4120}
4121
4122/*
4123 * Function to unlock a range of addresses in an hmeblk.  It returns the
4124 * next address that needs to be unlocked.
4125 * Should be called with the hash lock held.
4126 */
4127static caddr_t
4128sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4129{
4130	struct sf_hment *sfhme;
4131	tte_t tteold, ttemod;
4132	int ttesz, ret;
4133
4134	ASSERT(in_hblk_range(hmeblkp, addr));
4135	ASSERT(hmeblkp->hblk_shw_bit == 0);
4136
4137	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4138	ttesz = get_hblk_ttesz(hmeblkp);
4139
4140	HBLKTOHME(sfhme, hmeblkp, addr);
4141	while (addr < endaddr) {
4142readtte:
4143		sfmmu_copytte(&sfhme->hme_tte, &tteold);
4144		if (TTE_IS_VALID(&tteold)) {
4145
4146			ttemod = tteold;
4147
4148			ret = sfmmu_modifytte_try(&tteold, &ttemod,
4149			    &sfhme->hme_tte);
4150
4151			if (ret < 0)
4152				goto readtte;
4153
4154			if (hmeblkp->hblk_lckcnt == 0)
4155				panic("zero hblk lckcnt");
4156
4157			if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4158			    (uintptr_t)endaddr)
4159				panic("can't unlock large tte");
4160
4161			ASSERT(hmeblkp->hblk_lckcnt > 0);
4162			atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
4163			HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4164		} else {
4165			panic("sfmmu_hblk_unlock: invalid tte");
4166		}
4167		addr += TTEBYTES(ttesz);
4168		sfhme++;
4169	}
4170	return (addr);
4171}
4172
4173/*
4174 * Physical Address Mapping Framework
4175 *
4176 * General rules:
4177 *
4178 * (1) Applies only to seg_kmem memory pages. To make things easier,
4179 *     seg_kpm addresses are also accepted by the routines, but nothing
4180 *     is done with them since by definition their PA mappings are static.
4181 * (2) hat_add_callback() may only be called while holding the page lock
4182 *     SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4183 *     or passing HAC_PAGELOCK flag.
4184 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4185 *     hat_delete_callback(), nor should they allocate memory. Post quiesce
4186 *     callbacks may not sleep or acquire adaptive mutex locks.
4187 * (4) Either prehandler() or posthandler() (but not both) may be specified
4188 *     as being NULL.  Specifying an errhandler() is optional.
4189 *
4190 * Details of using the framework:
4191 *
4192 * registering a callback (hat_register_callback())
4193 *
4194 *	Pass prehandler, posthandler, errhandler addresses
4195 *	as described below. If capture_cpus argument is nonzero,
4196 *	suspend callback to the prehandler will occur with CPUs
4197 *	captured and executing xc_loop() and CPUs will remain
4198 *	captured until after the posthandler suspend callback
4199 *	occurs.
4200 *
4201 * adding a callback (hat_add_callback())
4202 *
4203 *      as_pagelock();
4204 *	hat_add_callback();
4205 *      save returned pfn in private data structures or program registers;
4206 *      as_pageunlock();
4207 *
4208 * prehandler()
4209 *
4210 *	Stop all accesses by physical address to this memory page.
4211 *	Called twice: the first, PRESUSPEND, is a context safe to acquire
4212 *	adaptive locks. The second, SUSPEND, is called at high PIL with
4213 *	CPUs captured so adaptive locks may NOT be acquired (and all spin
4214 *	locks must be XCALL_PIL or higher locks).
4215 *
4216 *	May return the following errors:
4217 *		EIO:	A fatal error has occurred. This will result in panic.
4218 *		EAGAIN:	The page cannot be suspended. This will fail the
4219 *			relocation.
4220 *		0:	Success.
4221 *
4222 * posthandler()
4223 *
4224 *      Save new pfn in private data structures or program registers;
4225 *	not allowed to fail (non-zero return values will result in panic).
4226 *
4227 * errhandler()
4228 *
4229 *	called when an error occurs related to the callback.  Currently
4230 *	the only such error is HAT_CB_ERR_LEAKED which indicates that
4231 *	a page is being freed, but there are still outstanding callback(s)
4232 *	registered on the page.
4233 *
4234 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4235 *
4236 *	stop using physical address
4237 *	hat_delete_callback();
4238 *
4239 */
4240
4241/*
4242 * Register a callback class.  Each subsystem should do this once and
4243 * cache the id_t returned for use in setting up and tearing down callbacks.
4244 *
4245 * There is no facility for removing callback IDs once they are created;
4246 * the "key" should be unique for each module, so in case a module is unloaded
4247 * and subsequently re-loaded, we can recycle the module's previous entry.
4248 */
4249id_t
4250hat_register_callback(int key,
4251	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4252	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4253	int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4254	int capture_cpus)
4255{
4256	id_t id;
4257
4258	/*
4259	 * Search the table for a pre-existing callback associated with
4260	 * the identifier "key".  If one exists, we re-use that entry in
4261	 * the table for this instance, otherwise we assign the next
4262	 * available table slot.
4263	 */
4264	for (id = 0; id < sfmmu_max_cb_id; id++) {
4265		if (sfmmu_cb_table[id].key == key)
4266			break;
4267	}
4268
4269	if (id == sfmmu_max_cb_id) {
4270		id = sfmmu_cb_nextid++;
4271		if (id >= sfmmu_max_cb_id)
4272			panic("hat_register_callback: out of callback IDs");
4273	}
4274
4275	ASSERT(prehandler != NULL || posthandler != NULL);
4276
4277	sfmmu_cb_table[id].key = key;
4278	sfmmu_cb_table[id].prehandler = prehandler;
4279	sfmmu_cb_table[id].posthandler = posthandler;
4280	sfmmu_cb_table[id].errhandler = errhandler;
4281	sfmmu_cb_table[id].capture_cpus = capture_cpus;
4282
4283	return (id);
4284}
4285
4286#define	HAC_COOKIE_NONE	(void *)-1
4287
4288/*
4289 * Add relocation callbacks to the specified addr/len which will be called
4290 * when relocating the associated page. See the description of pre and
4291 * posthandler above for more details.
4292 *
4293 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4294 * locked internally so the caller must be able to deal with the callback
4295 * running even before this function has returned.  If HAC_PAGELOCK is not
4296 * set, it is assumed that the underlying memory pages are locked.
4297 *
4298 * Since the caller must track the individual page boundaries anyway,
4299 * we only allow a callback to be added to a single page (large
4300 * or small).  Thus [addr, addr + len) MUST be contained within a single
4301 * page.
4302 *
4303 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4304 * _provided_that_ a unique parameter is specified for each callback.
4305 * If multiple callbacks are registered on the same range the callback will
4306 * be invoked with each unique parameter. Registering the same callback with
4307 * the same argument more than once will result in corrupted kernel state.
4308 *
4309 * Returns the pfn of the underlying kernel page in *rpfn
4310 * on success, or PFN_INVALID on failure.
4311 *
4312 * cookiep (if passed) provides storage space for an opaque cookie
4313 * to return later to hat_delete_callback(). This cookie makes the callback
4314 * deletion significantly quicker by avoiding a potentially lengthy hash
4315 * search.
4316 *
4317 * Returns values:
4318 *    0:      success
4319 *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4320 *    EINVAL: callback ID is not valid
4321 *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4322 *            space
4323 *    ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4324 */
4325int
4326hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4327	void *pvt, pfn_t *rpfn, void **cookiep)
4328{
4329	struct 		hmehash_bucket *hmebp;
4330	hmeblk_tag 	hblktag;
4331	struct hme_blk	*hmeblkp;
4332	int 		hmeshift, hashno;
4333	caddr_t 	saddr, eaddr, baseaddr;
4334	struct pa_hment *pahmep;
4335	struct sf_hment *sfhmep, *osfhmep;
4336	kmutex_t	*pml;
4337	tte_t   	tte;
4338	page_t		*pp;
4339	vnode_t		*vp;
4340	u_offset_t	off;
4341	pfn_t		pfn;
4342	int		kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4343	int		locked = 0;
4344
4345	/*
4346	 * For KPM mappings, just return the physical address since we
4347	 * don't need to register any callbacks.
4348	 */
4349	if (IS_KPM_ADDR(vaddr)) {
4350		uint64_t paddr;
4351		SFMMU_KPM_VTOP(vaddr, paddr);
4352		*rpfn = btop(paddr);
4353		if (cookiep != NULL)
4354			*cookiep = HAC_COOKIE_NONE;
4355		return (0);
4356	}
4357
4358	if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4359		*rpfn = PFN_INVALID;
4360		return (EINVAL);
4361	}
4362
4363	if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4364		*rpfn = PFN_INVALID;
4365		return (ENOMEM);
4366	}
4367
4368	sfhmep = &pahmep->sfment;
4369
4370	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4371	eaddr = saddr + len;
4372
4373rehash:
4374	/* Find the mapping(s) for this page */
4375	for (hashno = TTE64K, hmeblkp = NULL;
4376	    hmeblkp == NULL && hashno <= mmu_hashcnt;
4377	    hashno++) {
4378		hmeshift = HME_HASH_SHIFT(hashno);
4379		hblktag.htag_id = ksfmmup;
4380		hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4381		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4382		hblktag.htag_rehash = hashno;
4383		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4384
4385		SFMMU_HASH_LOCK(hmebp);
4386
4387		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4388
4389		if (hmeblkp == NULL)
4390			SFMMU_HASH_UNLOCK(hmebp);
4391	}
4392
4393	if (hmeblkp == NULL) {
4394		kmem_cache_free(pa_hment_cache, pahmep);
4395		*rpfn = PFN_INVALID;
4396		return (ENXIO);
4397	}
4398
4399	ASSERT(!hmeblkp->hblk_shared);
4400
4401	HBLKTOHME(osfhmep, hmeblkp, saddr);
4402	sfmmu_copytte(&osfhmep->hme_tte, &tte);
4403
4404	if (!TTE_IS_VALID(&tte)) {
4405		SFMMU_HASH_UNLOCK(hmebp);
4406		kmem_cache_free(pa_hment_cache, pahmep);
4407		*rpfn = PFN_INVALID;
4408		return (ENXIO);
4409	}
4410
4411	/*
4412	 * Make sure the boundaries for the callback fall within this
4413	 * single mapping.
4414	 */
4415	baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4416	ASSERT(saddr >= baseaddr);
4417	if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4418		SFMMU_HASH_UNLOCK(hmebp);
4419		kmem_cache_free(pa_hment_cache, pahmep);
4420		*rpfn = PFN_INVALID;
4421		return (ERANGE);
4422	}
4423
4424	pfn = sfmmu_ttetopfn(&tte, vaddr);
4425
4426	/*
4427	 * The pfn may not have a page_t underneath in which case we
4428	 * just return it. This can happen if we are doing I/O to a
4429	 * static portion of the kernel's address space, for instance.
4430	 */
4431	pp = osfhmep->hme_page;
4432	if (pp == NULL) {
4433		SFMMU_HASH_UNLOCK(hmebp);
4434		kmem_cache_free(pa_hment_cache, pahmep);
4435		*rpfn = pfn;
4436		if (cookiep)
4437			*cookiep = HAC_COOKIE_NONE;
4438		return (0);
4439	}
4440	ASSERT(pp == PP_PAGEROOT(pp));
4441
4442	vp = pp->p_vnode;
4443	off = pp->p_offset;
4444
4445	pml = sfmmu_mlist_enter(pp);
4446
4447	if (flags & HAC_PAGELOCK) {
4448		if (!page_trylock(pp, SE_SHARED)) {
4449			/*
4450			 * Somebody is holding SE_EXCL lock. Might
4451			 * even be hat_page_relocate(). Drop all
4452			 * our locks, lookup the page in &kvp, and
4453			 * retry. If it doesn't exist in &kvp and &zvp,
4454			 * then we must be dealing with a kernel mapped
4455			 * page which doesn't actually belong to
4456			 * segkmem so we punt.
4457			 */
4458			sfmmu_mlist_exit(pml);
4459			SFMMU_HASH_UNLOCK(hmebp);
4460			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4461
4462			/* check zvp before giving up */
4463			if (pp == NULL)
4464				pp = page_lookup(&zvp, (u_offset_t)saddr,
4465				    SE_SHARED);
4466
4467			/* Okay, we didn't find it, give up */
4468			if (pp == NULL) {
4469				kmem_cache_free(pa_hment_cache, pahmep);
4470				*rpfn = pfn;
4471				if (cookiep)
4472					*cookiep = HAC_COOKIE_NONE;
4473				return (0);
4474			}
4475			page_unlock(pp);
4476			goto rehash;
4477		}
4478		locked = 1;
4479	}
4480
4481	if (!PAGE_LOCKED(pp) && !panicstr)
4482		panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4483
4484	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4485	    pp->p_offset != off) {
4486		/*
4487		 * The page moved before we got our hands on it.  Drop
4488		 * all the locks and try again.
4489		 */
4490		ASSERT((flags & HAC_PAGELOCK) != 0);
4491		sfmmu_mlist_exit(pml);
4492		SFMMU_HASH_UNLOCK(hmebp);
4493		page_unlock(pp);
4494		locked = 0;
4495		goto rehash;
4496	}
4497
4498	if (!VN_ISKAS(vp)) {
4499		/*
4500		 * This is not a segkmem page but another page which
4501		 * has been kernel mapped. It had better have at least
4502		 * a share lock on it. Return the pfn.
4503		 */
4504		sfmmu_mlist_exit(pml);
4505		SFMMU_HASH_UNLOCK(hmebp);
4506		if (locked)
4507			page_unlock(pp);
4508		kmem_cache_free(pa_hment_cache, pahmep);
4509		ASSERT(PAGE_LOCKED(pp));
4510		*rpfn = pfn;
4511		if (cookiep)
4512			*cookiep = HAC_COOKIE_NONE;
4513		return (0);
4514	}
4515
4516	/*
4517	 * Setup this pa_hment and link its embedded dummy sf_hment into
4518	 * the mapping list.
4519	 */
4520	pp->p_share++;
4521	pahmep->cb_id = callback_id;
4522	pahmep->addr = vaddr;
4523	pahmep->len = len;
4524	pahmep->refcnt = 1;
4525	pahmep->flags = 0;
4526	pahmep->pvt = pvt;
4527
4528	sfhmep->hme_tte.ll = 0;
4529	sfhmep->hme_data = pahmep;
4530	sfhmep->hme_prev = osfhmep;
4531	sfhmep->hme_next = osfhmep->hme_next;
4532
4533	if (osfhmep->hme_next)
4534		osfhmep->hme_next->hme_prev = sfhmep;
4535
4536	osfhmep->hme_next = sfhmep;
4537
4538	sfmmu_mlist_exit(pml);
4539	SFMMU_HASH_UNLOCK(hmebp);
4540
4541	if (locked)
4542		page_unlock(pp);
4543
4544	*rpfn = pfn;
4545	if (cookiep)
4546		*cookiep = (void *)pahmep;
4547
4548	return (0);
4549}
4550
4551/*
4552 * Remove the relocation callbacks from the specified addr/len.
4553 */
4554void
4555hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4556	void *cookie)
4557{
4558	struct		hmehash_bucket *hmebp;
4559	hmeblk_tag	hblktag;
4560	struct hme_blk	*hmeblkp;
4561	int		hmeshift, hashno;
4562	caddr_t		saddr;
4563	struct pa_hment	*pahmep;
4564	struct sf_hment	*sfhmep, *osfhmep;
4565	kmutex_t	*pml;
4566	tte_t		tte;
4567	page_t		*pp;
4568	vnode_t		*vp;
4569	u_offset_t	off;
4570	int		locked = 0;
4571
4572	/*
4573	 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4574	 * remove so just return.
4575	 */
4576	if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4577		return;
4578
4579	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4580
4581rehash:
4582	/* Find the mapping(s) for this page */
4583	for (hashno = TTE64K, hmeblkp = NULL;
4584	    hmeblkp == NULL && hashno <= mmu_hashcnt;
4585	    hashno++) {
4586		hmeshift = HME_HASH_SHIFT(hashno);
4587		hblktag.htag_id = ksfmmup;
4588		hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4589		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4590		hblktag.htag_rehash = hashno;
4591		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4592
4593		SFMMU_HASH_LOCK(hmebp);
4594
4595		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4596
4597		if (hmeblkp == NULL)
4598			SFMMU_HASH_UNLOCK(hmebp);
4599	}
4600
4601	if (hmeblkp == NULL)
4602		return;
4603
4604	ASSERT(!hmeblkp->hblk_shared);
4605
4606	HBLKTOHME(osfhmep, hmeblkp, saddr);
4607
4608	sfmmu_copytte(&osfhmep->hme_tte, &tte);
4609	if (!TTE_IS_VALID(&tte)) {
4610		SFMMU_HASH_UNLOCK(hmebp);
4611		return;
4612	}
4613
4614	pp = osfhmep->hme_page;
4615	if (pp == NULL) {
4616		SFMMU_HASH_UNLOCK(hmebp);
4617		ASSERT(cookie == NULL);
4618		return;
4619	}
4620
4621	vp = pp->p_vnode;
4622	off = pp->p_offset;
4623
4624	pml = sfmmu_mlist_enter(pp);
4625
4626	if (flags & HAC_PAGELOCK) {
4627		if (!page_trylock(pp, SE_SHARED)) {
4628			/*
4629			 * Somebody is holding SE_EXCL lock. Might
4630			 * even be hat_page_relocate(). Drop all
4631			 * our locks, lookup the page in &kvp, and
4632			 * retry. If it doesn't exist in &kvp and &zvp,
4633			 * then we must be dealing with a kernel mapped
4634			 * page which doesn't actually belong to
4635			 * segkmem so we punt.
4636			 */
4637			sfmmu_mlist_exit(pml);
4638			SFMMU_HASH_UNLOCK(hmebp);
4639			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4640			/* check zvp before giving up */
4641			if (pp == NULL)
4642				pp = page_lookup(&zvp, (u_offset_t)saddr,
4643				    SE_SHARED);
4644
4645			if (pp == NULL) {
4646				ASSERT(cookie == NULL);
4647				return;
4648			}
4649			page_unlock(pp);
4650			goto rehash;
4651		}
4652		locked = 1;
4653	}
4654
4655	ASSERT(PAGE_LOCKED(pp));
4656
4657	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4658	    pp->p_offset != off) {
4659		/*
4660		 * The page moved before we got our hands on it.  Drop
4661		 * all the locks and try again.
4662		 */
4663		ASSERT((flags & HAC_PAGELOCK) != 0);
4664		sfmmu_mlist_exit(pml);
4665		SFMMU_HASH_UNLOCK(hmebp);
4666		page_unlock(pp);
4667		locked = 0;
4668		goto rehash;
4669	}
4670
4671	if (!VN_ISKAS(vp)) {
4672		/*
4673		 * This is not a segkmem page but another page which
4674		 * has been kernel mapped.
4675		 */
4676		sfmmu_mlist_exit(pml);
4677		SFMMU_HASH_UNLOCK(hmebp);
4678		if (locked)
4679			page_unlock(pp);
4680		ASSERT(cookie == NULL);
4681		return;
4682	}
4683
4684	if (cookie != NULL) {
4685		pahmep = (struct pa_hment *)cookie;
4686		sfhmep = &pahmep->sfment;
4687	} else {
4688		for (sfhmep = pp->p_mapping; sfhmep != NULL;
4689		    sfhmep = sfhmep->hme_next) {
4690
4691			/*
4692			 * skip va<->pa mappings
4693			 */
4694			if (!IS_PAHME(sfhmep))
4695				continue;
4696
4697			pahmep = sfhmep->hme_data;
4698			ASSERT(pahmep != NULL);
4699
4700			/*
4701			 * if pa_hment matches, remove it
4702			 */
4703			if ((pahmep->pvt == pvt) &&
4704			    (pahmep->addr == vaddr) &&
4705			    (pahmep->len == len)) {
4706				break;
4707			}
4708		}
4709	}
4710
4711	if (sfhmep == NULL) {
4712		if (!panicstr) {
4713			panic("hat_delete_callback: pa_hment not found, pp %p",
4714			    (void *)pp);
4715		}
4716		return;
4717	}
4718
4719	/*
4720	 * Note: at this point a valid kernel mapping must still be
4721	 * present on this page.
4722	 */
4723	pp->p_share--;
4724	if (pp->p_share <= 0)
4725		panic("hat_delete_callback: zero p_share");
4726
4727	if (--pahmep->refcnt == 0) {
4728		if (pahmep->flags != 0)
4729			panic("hat_delete_callback: pa_hment is busy");
4730
4731		/*
4732		 * Remove sfhmep from the mapping list for the page.
4733		 */
4734		if (sfhmep->hme_prev) {
4735			sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4736		} else {
4737			pp->p_mapping = sfhmep->hme_next;
4738		}
4739
4740		if (sfhmep->hme_next)
4741			sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4742
4743		sfmmu_mlist_exit(pml);
4744		SFMMU_HASH_UNLOCK(hmebp);
4745
4746		if (locked)
4747			page_unlock(pp);
4748
4749		kmem_cache_free(pa_hment_cache, pahmep);
4750		return;
4751	}
4752
4753	sfmmu_mlist_exit(pml);
4754	SFMMU_HASH_UNLOCK(hmebp);
4755	if (locked)
4756		page_unlock(pp);
4757}
4758
4759/*
4760 * hat_probe returns 1 if the translation for the address 'addr' is
4761 * loaded, zero otherwise.
4762 *
4763 * hat_probe should be used only for advisorary purposes because it may
4764 * occasionally return the wrong value. The implementation must guarantee that
4765 * returning the wrong value is a very rare event. hat_probe is used
4766 * to implement optimizations in the segment drivers.
4767 *
4768 */
4769int
4770hat_probe(struct hat *sfmmup, caddr_t addr)
4771{
4772	pfn_t pfn;
4773	tte_t tte;
4774
4775	ASSERT(sfmmup != NULL);
4776	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4777
4778	ASSERT((sfmmup == ksfmmup) ||
4779	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4780
4781	if (sfmmup == ksfmmup) {
4782		while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4783		    == PFN_SUSPENDED) {
4784			sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4785		}
4786	} else {
4787		pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4788	}
4789
4790	if (pfn != PFN_INVALID)
4791		return (1);
4792	else
4793		return (0);
4794}
4795
4796ssize_t
4797hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4798{
4799	tte_t tte;
4800
4801	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4802
4803	if (sfmmup == ksfmmup) {
4804		if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4805			return (-1);
4806		}
4807	} else {
4808		if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4809			return (-1);
4810		}
4811	}
4812
4813	ASSERT(TTE_IS_VALID(&tte));
4814	return (TTEBYTES(TTE_CSZ(&tte)));
4815}
4816
4817uint_t
4818hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4819{
4820	tte_t tte;
4821
4822	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4823
4824	if (sfmmup == ksfmmup) {
4825		if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4826			tte.ll = 0;
4827		}
4828	} else {
4829		if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4830			tte.ll = 0;
4831		}
4832	}
4833	if (TTE_IS_VALID(&tte)) {
4834		*attr = sfmmu_ptov_attr(&tte);
4835		return (0);
4836	}
4837	*attr = 0;
4838	return ((uint_t)0xffffffff);
4839}
4840
4841/*
4842 * Enables more attributes on specified address range (ie. logical OR)
4843 */
4844void
4845hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4846{
4847	if (hat->sfmmu_xhat_provider) {
4848		XHAT_SETATTR(hat, addr, len, attr);
4849		return;
4850	} else {
4851		/*
4852		 * This must be a CPU HAT. If the address space has
4853		 * XHATs attached, change attributes for all of them,
4854		 * just in case
4855		 */
4856		ASSERT(hat->sfmmu_as != NULL);
4857		if (hat->sfmmu_as->a_xhat != NULL)
4858			xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4859	}
4860
4861	sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4862}
4863
4864/*
4865 * Assigns attributes to the specified address range.  All the attributes
4866 * are specified.
4867 */
4868void
4869hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4870{
4871	if (hat->sfmmu_xhat_provider) {
4872		XHAT_CHGATTR(hat, addr, len, attr);
4873		return;
4874	} else {
4875		/*
4876		 * This must be a CPU HAT. If the address space has
4877		 * XHATs attached, change attributes for all of them,
4878		 * just in case
4879		 */
4880		ASSERT(hat->sfmmu_as != NULL);
4881		if (hat->sfmmu_as->a_xhat != NULL)
4882			xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4883	}
4884
4885	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4886}
4887
4888/*
4889 * Remove attributes on the specified address range (ie. loginal NAND)
4890 */
4891void
4892hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4893{
4894	if (hat->sfmmu_xhat_provider) {
4895		XHAT_CLRATTR(hat, addr, len, attr);
4896		return;
4897	} else {
4898		/*
4899		 * This must be a CPU HAT. If the address space has
4900		 * XHATs attached, change attributes for all of them,
4901		 * just in case
4902		 */
4903		ASSERT(hat->sfmmu_as != NULL);
4904		if (hat->sfmmu_as->a_xhat != NULL)
4905			xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4906	}
4907
4908	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4909}
4910
4911/*
4912 * Change attributes on an address range to that specified by attr and mode.
4913 */
4914static void
4915sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4916	int mode)
4917{
4918	struct hmehash_bucket *hmebp;
4919	hmeblk_tag hblktag;
4920	int hmeshift, hashno = 1;
4921	struct hme_blk *hmeblkp, *list = NULL;
4922	caddr_t endaddr;
4923	cpuset_t cpuset;
4924	demap_range_t dmr;
4925
4926	CPUSET_ZERO(cpuset);
4927
4928	ASSERT((sfmmup == ksfmmup) ||
4929	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4930	ASSERT((len & MMU_PAGEOFFSET) == 0);
4931	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4932
4933	if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4934	    ((addr + len) > (caddr_t)USERLIMIT)) {
4935		panic("user addr %p in kernel space",
4936		    (void *)addr);
4937	}
4938
4939	endaddr = addr + len;
4940	hblktag.htag_id = sfmmup;
4941	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4942	DEMAP_RANGE_INIT(sfmmup, &dmr);
4943
4944	while (addr < endaddr) {
4945		hmeshift = HME_HASH_SHIFT(hashno);
4946		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4947		hblktag.htag_rehash = hashno;
4948		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4949
4950		SFMMU_HASH_LOCK(hmebp);
4951
4952		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4953		if (hmeblkp != NULL) {
4954			ASSERT(!hmeblkp->hblk_shared);
4955			/*
4956			 * We've encountered a shadow hmeblk so skip the range
4957			 * of the next smaller mapping size.
4958			 */
4959			if (hmeblkp->hblk_shw_bit) {
4960				ASSERT(sfmmup != ksfmmup);
4961				ASSERT(hashno > 1);
4962				addr = (caddr_t)P2END((uintptr_t)addr,
4963				    TTEBYTES(hashno - 1));
4964			} else {
4965				addr = sfmmu_hblk_chgattr(sfmmup,
4966				    hmeblkp, addr, endaddr, &dmr, attr, mode);
4967			}
4968			SFMMU_HASH_UNLOCK(hmebp);
4969			hashno = 1;
4970			continue;
4971		}
4972		SFMMU_HASH_UNLOCK(hmebp);
4973
4974		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4975			/*
4976			 * We have traversed the whole list and rehashed
4977			 * if necessary without finding the address to chgattr.
4978			 * This is ok, so we increment the address by the
4979			 * smallest hmeblk range for kernel mappings or for
4980			 * user mappings with no large pages, and the largest
4981			 * hmeblk range, to account for shadow hmeblks, for
4982			 * user mappings with large pages and continue.
4983			 */
4984			if (sfmmup == ksfmmup)
4985				addr = (caddr_t)P2END((uintptr_t)addr,
4986				    TTEBYTES(1));
4987			else
4988				addr = (caddr_t)P2END((uintptr_t)addr,
4989				    TTEBYTES(hashno));
4990			hashno = 1;
4991		} else {
4992			hashno++;
4993		}
4994	}
4995
4996	sfmmu_hblks_list_purge(&list, 0);
4997	DEMAP_RANGE_FLUSH(&dmr);
4998	cpuset = sfmmup->sfmmu_cpusran;
4999	xt_sync(cpuset);
5000}
5001
5002/*
5003 * This function chgattr on a range of addresses in an hmeblk.  It returns the
5004 * next addres that needs to be chgattr.
5005 * It should be called with the hash lock held.
5006 * XXX It should be possible to optimize chgattr by not flushing every time but
5007 * on the other hand:
5008 * 1. do one flush crosscall.
5009 * 2. only flush if we are increasing permissions (make sure this will work)
5010 */
5011static caddr_t
5012sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5013	caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
5014{
5015	tte_t tte, tteattr, tteflags, ttemod;
5016	struct sf_hment *sfhmep;
5017	int ttesz;
5018	struct page *pp = NULL;
5019	kmutex_t *pml, *pmtx;
5020	int ret;
5021	int use_demap_range;
5022#if defined(SF_ERRATA_57)
5023	int check_exec;
5024#endif
5025
5026	ASSERT(in_hblk_range(hmeblkp, addr));
5027	ASSERT(hmeblkp->hblk_shw_bit == 0);
5028	ASSERT(!hmeblkp->hblk_shared);
5029
5030	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5031	ttesz = get_hblk_ttesz(hmeblkp);
5032
5033	/*
5034	 * Flush the current demap region if addresses have been
5035	 * skipped or the page size doesn't match.
5036	 */
5037	use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
5038	if (use_demap_range) {
5039		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5040	} else {
5041		DEMAP_RANGE_FLUSH(dmrp);
5042	}
5043
5044	tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
5045#if defined(SF_ERRATA_57)
5046	check_exec = (sfmmup != ksfmmup) &&
5047	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5048	    TTE_IS_EXECUTABLE(&tteattr);
5049#endif
5050	HBLKTOHME(sfhmep, hmeblkp, addr);
5051	while (addr < endaddr) {
5052		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5053		if (TTE_IS_VALID(&tte)) {
5054			if ((tte.ll & tteflags.ll) == tteattr.ll) {
5055				/*
5056				 * if the new attr is the same as old
5057				 * continue
5058				 */
5059				goto next_addr;
5060			}
5061			if (!TTE_IS_WRITABLE(&tteattr)) {
5062				/*
5063				 * make sure we clear hw modify bit if we
5064				 * removing write protections
5065				 */
5066				tteflags.tte_intlo |= TTE_HWWR_INT;
5067			}
5068
5069			pml = NULL;
5070			pp = sfhmep->hme_page;
5071			if (pp) {
5072				pml = sfmmu_mlist_enter(pp);
5073			}
5074
5075			if (pp != sfhmep->hme_page) {
5076				/*
5077				 * tte must have been unloaded.
5078				 */
5079				ASSERT(pml);
5080				sfmmu_mlist_exit(pml);
5081				continue;
5082			}
5083
5084			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5085
5086			ttemod = tte;
5087			ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5088			ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5089
5090#if defined(SF_ERRATA_57)
5091			if (check_exec && addr < errata57_limit)
5092				ttemod.tte_exec_perm = 0;
5093#endif
5094			ret = sfmmu_modifytte_try(&tte, &ttemod,
5095			    &sfhmep->hme_tte);
5096
5097			if (ret < 0) {
5098				/* tte changed underneath us */
5099				if (pml) {
5100					sfmmu_mlist_exit(pml);
5101				}
5102				continue;
5103			}
5104
5105			if (tteflags.tte_intlo & TTE_HWWR_INT) {
5106				/*
5107				 * need to sync if we are clearing modify bit.
5108				 */
5109				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5110			}
5111
5112			if (pp && PP_ISRO(pp)) {
5113				if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5114					pmtx = sfmmu_page_enter(pp);
5115					PP_CLRRO(pp);
5116					sfmmu_page_exit(pmtx);
5117				}
5118			}
5119
5120			if (ret > 0 && use_demap_range) {
5121				DEMAP_RANGE_MARKPG(dmrp, addr);
5122			} else if (ret > 0) {
5123				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5124			}
5125
5126			if (pml) {
5127				sfmmu_mlist_exit(pml);
5128			}
5129		}
5130next_addr:
5131		addr += TTEBYTES(ttesz);
5132		sfhmep++;
5133		DEMAP_RANGE_NEXTPG(dmrp);
5134	}
5135	return (addr);
5136}
5137
5138/*
5139 * This routine converts virtual attributes to physical ones.  It will
5140 * update the tteflags field with the tte mask corresponding to the attributes
5141 * affected and it returns the new attributes.  It will also clear the modify
5142 * bit if we are taking away write permission.  This is necessary since the
5143 * modify bit is the hardware permission bit and we need to clear it in order
5144 * to detect write faults.
5145 */
5146static uint64_t
5147sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5148{
5149	tte_t ttevalue;
5150
5151	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5152
5153	switch (mode) {
5154	case SFMMU_CHGATTR:
5155		/* all attributes specified */
5156		ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5157		ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5158		ttemaskp->tte_inthi = TTEINTHI_ATTR;
5159		ttemaskp->tte_intlo = TTEINTLO_ATTR;
5160		break;
5161	case SFMMU_SETATTR:
5162		ASSERT(!(attr & ~HAT_PROT_MASK));
5163		ttemaskp->ll = 0;
5164		ttevalue.ll = 0;
5165		/*
5166		 * a valid tte implies exec and read for sfmmu
5167		 * so no need to do anything about them.
5168		 * since priviledged access implies user access
5169		 * PROT_USER doesn't make sense either.
5170		 */
5171		if (attr & PROT_WRITE) {
5172			ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5173			ttevalue.tte_intlo |= TTE_WRPRM_INT;
5174		}
5175		break;
5176	case SFMMU_CLRATTR:
5177		/* attributes will be nand with current ones */
5178		if (attr & ~(PROT_WRITE | PROT_USER)) {
5179			panic("sfmmu: attr %x not supported", attr);
5180		}
5181		ttemaskp->ll = 0;
5182		ttevalue.ll = 0;
5183		if (attr & PROT_WRITE) {
5184			/* clear both writable and modify bit */
5185			ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5186		}
5187		if (attr & PROT_USER) {
5188			ttemaskp->tte_intlo |= TTE_PRIV_INT;
5189			ttevalue.tte_intlo |= TTE_PRIV_INT;
5190		}
5191		break;
5192	default:
5193		panic("sfmmu_vtop_attr: bad mode %x", mode);
5194	}
5195	ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5196	return (ttevalue.ll);
5197}
5198
5199static uint_t
5200sfmmu_ptov_attr(tte_t *ttep)
5201{
5202	uint_t attr;
5203
5204	ASSERT(TTE_IS_VALID(ttep));
5205
5206	attr = PROT_READ;
5207
5208	if (TTE_IS_WRITABLE(ttep)) {
5209		attr |= PROT_WRITE;
5210	}
5211	if (TTE_IS_EXECUTABLE(ttep)) {
5212		attr |= PROT_EXEC;
5213	}
5214	if (!TTE_IS_PRIVILEGED(ttep)) {
5215		attr |= PROT_USER;
5216	}
5217	if (TTE_IS_NFO(ttep)) {
5218		attr |= HAT_NOFAULT;
5219	}
5220	if (TTE_IS_NOSYNC(ttep)) {
5221		attr |= HAT_NOSYNC;
5222	}
5223	if (TTE_IS_SIDEFFECT(ttep)) {
5224		attr |= SFMMU_SIDEFFECT;
5225	}
5226	if (!TTE_IS_VCACHEABLE(ttep)) {
5227		attr |= SFMMU_UNCACHEVTTE;
5228	}
5229	if (!TTE_IS_PCACHEABLE(ttep)) {
5230		attr |= SFMMU_UNCACHEPTTE;
5231	}
5232	return (attr);
5233}
5234
5235/*
5236 * hat_chgprot is a deprecated hat call.  New segment drivers
5237 * should store all attributes and use hat_*attr calls.
5238 *
5239 * Change the protections in the virtual address range
5240 * given to the specified virtual protection.  If vprot is ~PROT_WRITE,
5241 * then remove write permission, leaving the other
5242 * permissions unchanged.  If vprot is ~PROT_USER, remove user permissions.
5243 *
5244 */
5245void
5246hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5247{
5248	struct hmehash_bucket *hmebp;
5249	hmeblk_tag hblktag;
5250	int hmeshift, hashno = 1;
5251	struct hme_blk *hmeblkp, *list = NULL;
5252	caddr_t endaddr;
5253	cpuset_t cpuset;
5254	demap_range_t dmr;
5255
5256	ASSERT((len & MMU_PAGEOFFSET) == 0);
5257	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5258
5259	if (sfmmup->sfmmu_xhat_provider) {
5260		XHAT_CHGPROT(sfmmup, addr, len, vprot);
5261		return;
5262	} else {
5263		/*
5264		 * This must be a CPU HAT. If the address space has
5265		 * XHATs attached, change attributes for all of them,
5266		 * just in case
5267		 */
5268		ASSERT(sfmmup->sfmmu_as != NULL);
5269		if (sfmmup->sfmmu_as->a_xhat != NULL)
5270			xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5271	}
5272
5273	CPUSET_ZERO(cpuset);
5274
5275	if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5276	    ((addr + len) > (caddr_t)USERLIMIT)) {
5277		panic("user addr %p vprot %x in kernel space",
5278		    (void *)addr, vprot);
5279	}
5280	endaddr = addr + len;
5281	hblktag.htag_id = sfmmup;
5282	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5283	DEMAP_RANGE_INIT(sfmmup, &dmr);
5284
5285	while (addr < endaddr) {
5286		hmeshift = HME_HASH_SHIFT(hashno);
5287		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5288		hblktag.htag_rehash = hashno;
5289		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5290
5291		SFMMU_HASH_LOCK(hmebp);
5292
5293		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5294		if (hmeblkp != NULL) {
5295			ASSERT(!hmeblkp->hblk_shared);
5296			/*
5297			 * We've encountered a shadow hmeblk so skip the range
5298			 * of the next smaller mapping size.
5299			 */
5300			if (hmeblkp->hblk_shw_bit) {
5301				ASSERT(sfmmup != ksfmmup);
5302				ASSERT(hashno > 1);
5303				addr = (caddr_t)P2END((uintptr_t)addr,
5304				    TTEBYTES(hashno - 1));
5305			} else {
5306				addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5307				    addr, endaddr, &dmr, vprot);
5308			}
5309			SFMMU_HASH_UNLOCK(hmebp);
5310			hashno = 1;
5311			continue;
5312		}
5313		SFMMU_HASH_UNLOCK(hmebp);
5314
5315		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5316			/*
5317			 * We have traversed the whole list and rehashed
5318			 * if necessary without finding the address to chgprot.
5319			 * This is ok so we increment the address by the
5320			 * smallest hmeblk range for kernel mappings and the
5321			 * largest hmeblk range, to account for shadow hmeblks,
5322			 * for user mappings and continue.
5323			 */
5324			if (sfmmup == ksfmmup)
5325				addr = (caddr_t)P2END((uintptr_t)addr,
5326				    TTEBYTES(1));
5327			else
5328				addr = (caddr_t)P2END((uintptr_t)addr,
5329				    TTEBYTES(hashno));
5330			hashno = 1;
5331		} else {
5332			hashno++;
5333		}
5334	}
5335
5336	sfmmu_hblks_list_purge(&list, 0);
5337	DEMAP_RANGE_FLUSH(&dmr);
5338	cpuset = sfmmup->sfmmu_cpusran;
5339	xt_sync(cpuset);
5340}
5341
5342/*
5343 * This function chgprots a range of addresses in an hmeblk.  It returns the
5344 * next addres that needs to be chgprot.
5345 * It should be called with the hash lock held.
5346 * XXX It shold be possible to optimize chgprot by not flushing every time but
5347 * on the other hand:
5348 * 1. do one flush crosscall.
5349 * 2. only flush if we are increasing permissions (make sure this will work)
5350 */
5351static caddr_t
5352sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5353	caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5354{
5355	uint_t pprot;
5356	tte_t tte, ttemod;
5357	struct sf_hment *sfhmep;
5358	uint_t tteflags;
5359	int ttesz;
5360	struct page *pp = NULL;
5361	kmutex_t *pml, *pmtx;
5362	int ret;
5363	int use_demap_range;
5364#if defined(SF_ERRATA_57)
5365	int check_exec;
5366#endif
5367
5368	ASSERT(in_hblk_range(hmeblkp, addr));
5369	ASSERT(hmeblkp->hblk_shw_bit == 0);
5370	ASSERT(!hmeblkp->hblk_shared);
5371
5372#ifdef DEBUG
5373	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5374	    (endaddr < get_hblk_endaddr(hmeblkp))) {
5375		panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5376	}
5377#endif /* DEBUG */
5378
5379	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5380	ttesz = get_hblk_ttesz(hmeblkp);
5381
5382	pprot = sfmmu_vtop_prot(vprot, &tteflags);
5383#if defined(SF_ERRATA_57)
5384	check_exec = (sfmmup != ksfmmup) &&
5385	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5386	    ((vprot & PROT_EXEC) == PROT_EXEC);
5387#endif
5388	HBLKTOHME(sfhmep, hmeblkp, addr);
5389
5390	/*
5391	 * Flush the current demap region if addresses have been
5392	 * skipped or the page size doesn't match.
5393	 */
5394	use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5395	if (use_demap_range) {
5396		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5397	} else {
5398		DEMAP_RANGE_FLUSH(dmrp);
5399	}
5400
5401	while (addr < endaddr) {
5402		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5403		if (TTE_IS_VALID(&tte)) {
5404			if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5405				/*
5406				 * if the new protection is the same as old
5407				 * continue
5408				 */
5409				goto next_addr;
5410			}
5411			pml = NULL;
5412			pp = sfhmep->hme_page;
5413			if (pp) {
5414				pml = sfmmu_mlist_enter(pp);
5415			}
5416			if (pp != sfhmep->hme_page) {
5417				/*
5418				 * tte most have been unloaded
5419				 * underneath us.  Recheck
5420				 */
5421				ASSERT(pml);
5422				sfmmu_mlist_exit(pml);
5423				continue;
5424			}
5425
5426			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5427
5428			ttemod = tte;
5429			TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5430#if defined(SF_ERRATA_57)
5431			if (check_exec && addr < errata57_limit)
5432				ttemod.tte_exec_perm = 0;
5433#endif
5434			ret = sfmmu_modifytte_try(&tte, &ttemod,
5435			    &sfhmep->hme_tte);
5436
5437			if (ret < 0) {
5438				/* tte changed underneath us */
5439				if (pml) {
5440					sfmmu_mlist_exit(pml);
5441				}
5442				continue;
5443			}
5444
5445			if (tteflags & TTE_HWWR_INT) {
5446				/*
5447				 * need to sync if we are clearing modify bit.
5448				 */
5449				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5450			}
5451
5452			if (pp && PP_ISRO(pp)) {
5453				if (pprot & TTE_WRPRM_INT) {
5454					pmtx = sfmmu_page_enter(pp);
5455					PP_CLRRO(pp);
5456					sfmmu_page_exit(pmtx);
5457				}
5458			}
5459
5460			if (ret > 0 && use_demap_range) {
5461				DEMAP_RANGE_MARKPG(dmrp, addr);
5462			} else if (ret > 0) {
5463				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5464			}
5465
5466			if (pml) {
5467				sfmmu_mlist_exit(pml);
5468			}
5469		}
5470next_addr:
5471		addr += TTEBYTES(ttesz);
5472		sfhmep++;
5473		DEMAP_RANGE_NEXTPG(dmrp);
5474	}
5475	return (addr);
5476}
5477
5478/*
5479 * This routine is deprecated and should only be used by hat_chgprot.
5480 * The correct routine is sfmmu_vtop_attr.
5481 * This routine converts virtual page protections to physical ones.  It will
5482 * update the tteflags field with the tte mask corresponding to the protections
5483 * affected and it returns the new protections.  It will also clear the modify
5484 * bit if we are taking away write permission.  This is necessary since the
5485 * modify bit is the hardware permission bit and we need to clear it in order
5486 * to detect write faults.
5487 * It accepts the following special protections:
5488 * ~PROT_WRITE = remove write permissions.
5489 * ~PROT_USER = remove user permissions.
5490 */
5491static uint_t
5492sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5493{
5494	if (vprot == (uint_t)~PROT_WRITE) {
5495		*tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5496		return (0);		/* will cause wrprm to be cleared */
5497	}
5498	if (vprot == (uint_t)~PROT_USER) {
5499		*tteflagsp = TTE_PRIV_INT;
5500		return (0);		/* will cause privprm to be cleared */
5501	}
5502	if ((vprot == 0) || (vprot == PROT_USER) ||
5503	    ((vprot & PROT_ALL) != vprot)) {
5504		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5505	}
5506
5507	switch (vprot) {
5508	case (PROT_READ):
5509	case (PROT_EXEC):
5510	case (PROT_EXEC | PROT_READ):
5511		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5512		return (TTE_PRIV_INT); 		/* set prv and clr wrt */
5513	case (PROT_WRITE):
5514	case (PROT_WRITE | PROT_READ):
5515	case (PROT_EXEC | PROT_WRITE):
5516	case (PROT_EXEC | PROT_WRITE | PROT_READ):
5517		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5518		return (TTE_PRIV_INT | TTE_WRPRM_INT); 	/* set prv and wrt */
5519	case (PROT_USER | PROT_READ):
5520	case (PROT_USER | PROT_EXEC):
5521	case (PROT_USER | PROT_EXEC | PROT_READ):
5522		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5523		return (0); 			/* clr prv and wrt */
5524	case (PROT_USER | PROT_WRITE):
5525	case (PROT_USER | PROT_WRITE | PROT_READ):
5526	case (PROT_USER | PROT_EXEC | PROT_WRITE):
5527	case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5528		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5529		return (TTE_WRPRM_INT); 	/* clr prv and set wrt */
5530	default:
5531		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5532	}
5533	return (0);
5534}
5535
5536/*
5537 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5538 * the normal algorithm would take too long for a very large VA range with
5539 * few real mappings. This routine just walks thru all HMEs in the global
5540 * hash table to find and remove mappings.
5541 */
5542static void
5543hat_unload_large_virtual(
5544	struct hat		*sfmmup,
5545	caddr_t			startaddr,
5546	size_t			len,
5547	uint_t			flags,
5548	hat_callback_t		*callback)
5549{
5550	struct hmehash_bucket *hmebp;
5551	struct hme_blk *hmeblkp;
5552	struct hme_blk *pr_hblk = NULL;
5553	struct hme_blk *nx_hblk;
5554	struct hme_blk *list = NULL;
5555	int i;
5556	demap_range_t dmr, *dmrp;
5557	cpuset_t cpuset;
5558	caddr_t	endaddr = startaddr + len;
5559	caddr_t	sa;
5560	caddr_t	ea;
5561	caddr_t	cb_sa[MAX_CB_ADDR];
5562	caddr_t	cb_ea[MAX_CB_ADDR];
5563	int	addr_cnt = 0;
5564	int	a = 0;
5565
5566	if (sfmmup->sfmmu_free) {
5567		dmrp = NULL;
5568	} else {
5569		dmrp = &dmr;
5570		DEMAP_RANGE_INIT(sfmmup, dmrp);
5571	}
5572
5573	/*
5574	 * Loop through all the hash buckets of HME blocks looking for matches.
5575	 */
5576	for (i = 0; i <= UHMEHASH_SZ; i++) {
5577		hmebp = &uhme_hash[i];
5578		SFMMU_HASH_LOCK(hmebp);
5579		hmeblkp = hmebp->hmeblkp;
5580		pr_hblk = NULL;
5581		while (hmeblkp) {
5582			nx_hblk = hmeblkp->hblk_next;
5583
5584			/*
5585			 * skip if not this context, if a shadow block or
5586			 * if the mapping is not in the requested range
5587			 */
5588			if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5589			    hmeblkp->hblk_shw_bit ||
5590			    (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5591			    (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5592				pr_hblk = hmeblkp;
5593				goto next_block;
5594			}
5595
5596			ASSERT(!hmeblkp->hblk_shared);
5597			/*
5598			 * unload if there are any current valid mappings
5599			 */
5600			if (hmeblkp->hblk_vcnt != 0 ||
5601			    hmeblkp->hblk_hmecnt != 0)
5602				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5603				    sa, ea, dmrp, flags);
5604
5605			/*
5606			 * on unmap we also release the HME block itself, once
5607			 * all mappings are gone.
5608			 */
5609			if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5610			    !hmeblkp->hblk_vcnt &&
5611			    !hmeblkp->hblk_hmecnt) {
5612				ASSERT(!hmeblkp->hblk_lckcnt);
5613				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5614				    &list, 0);
5615			} else {
5616				pr_hblk = hmeblkp;
5617			}
5618
5619			if (callback == NULL)
5620				goto next_block;
5621
5622			/*
5623			 * HME blocks may span more than one page, but we may be
5624			 * unmapping only one page, so check for a smaller range
5625			 * for the callback
5626			 */
5627			if (sa < startaddr)
5628				sa = startaddr;
5629			if (--ea > endaddr)
5630				ea = endaddr - 1;
5631
5632			cb_sa[addr_cnt] = sa;
5633			cb_ea[addr_cnt] = ea;
5634			if (++addr_cnt == MAX_CB_ADDR) {
5635				if (dmrp != NULL) {
5636					DEMAP_RANGE_FLUSH(dmrp);
5637					cpuset = sfmmup->sfmmu_cpusran;
5638					xt_sync(cpuset);
5639				}
5640
5641				for (a = 0; a < MAX_CB_ADDR; ++a) {
5642					callback->hcb_start_addr = cb_sa[a];
5643					callback->hcb_end_addr = cb_ea[a];
5644					callback->hcb_function(callback);
5645				}
5646				addr_cnt = 0;
5647			}
5648
5649next_block:
5650			hmeblkp = nx_hblk;
5651		}
5652		SFMMU_HASH_UNLOCK(hmebp);
5653	}
5654
5655	sfmmu_hblks_list_purge(&list, 0);
5656	if (dmrp != NULL) {
5657		DEMAP_RANGE_FLUSH(dmrp);
5658		cpuset = sfmmup->sfmmu_cpusran;
5659		xt_sync(cpuset);
5660	}
5661
5662	for (a = 0; a < addr_cnt; ++a) {
5663		callback->hcb_start_addr = cb_sa[a];
5664		callback->hcb_end_addr = cb_ea[a];
5665		callback->hcb_function(callback);
5666	}
5667
5668	/*
5669	 * Check TSB and TLB page sizes if the process isn't exiting.
5670	 */
5671	if (!sfmmup->sfmmu_free)
5672		sfmmu_check_page_sizes(sfmmup, 0);
5673}
5674
5675/*
5676 * Unload all the mappings in the range [addr..addr+len). addr and len must
5677 * be MMU_PAGESIZE aligned.
5678 */
5679
5680extern struct seg *segkmap;
5681#define	ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5682segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5683
5684
5685void
5686hat_unload_callback(
5687	struct hat *sfmmup,
5688	caddr_t addr,
5689	size_t len,
5690	uint_t flags,
5691	hat_callback_t *callback)
5692{
5693	struct hmehash_bucket *hmebp;
5694	hmeblk_tag hblktag;
5695	int hmeshift, hashno, iskernel;
5696	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5697	caddr_t endaddr;
5698	cpuset_t cpuset;
5699	int addr_count = 0;
5700	int a;
5701	caddr_t cb_start_addr[MAX_CB_ADDR];
5702	caddr_t cb_end_addr[MAX_CB_ADDR];
5703	int issegkmap = ISSEGKMAP(sfmmup, addr);
5704	demap_range_t dmr, *dmrp;
5705
5706	if (sfmmup->sfmmu_xhat_provider) {
5707		XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5708		return;
5709	} else {
5710		/*
5711		 * This must be a CPU HAT. If the address space has
5712		 * XHATs attached, unload the mappings for all of them,
5713		 * just in case
5714		 */
5715		ASSERT(sfmmup->sfmmu_as != NULL);
5716		if (sfmmup->sfmmu_as->a_xhat != NULL)
5717			xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5718			    len, flags, callback);
5719	}
5720
5721	ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5722	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5723
5724	ASSERT(sfmmup != NULL);
5725	ASSERT((len & MMU_PAGEOFFSET) == 0);
5726	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5727
5728	/*
5729	 * Probing through a large VA range (say 63 bits) will be slow, even
5730	 * at 4 Meg steps between the probes. So, when the virtual address range
5731	 * is very large, search the HME entries for what to unload.
5732	 *
5733	 *	len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5734	 *
5735	 *	UHMEHASH_SZ is number of hash buckets to examine
5736	 *
5737	 */
5738	if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5739		hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5740		return;
5741	}
5742
5743	CPUSET_ZERO(cpuset);
5744
5745	/*
5746	 * If the process is exiting, we can save a lot of fuss since
5747	 * we'll flush the TLB when we free the ctx anyway.
5748	 */
5749	if (sfmmup->sfmmu_free)
5750		dmrp = NULL;
5751	else
5752		dmrp = &dmr;
5753
5754	DEMAP_RANGE_INIT(sfmmup, dmrp);
5755	endaddr = addr + len;
5756	hblktag.htag_id = sfmmup;
5757	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5758
5759	/*
5760	 * It is likely for the vm to call unload over a wide range of
5761	 * addresses that are actually very sparsely populated by
5762	 * translations.  In order to speed this up the sfmmu hat supports
5763	 * the concept of shadow hmeblks. Dummy large page hmeblks that
5764	 * correspond to actual small translations are allocated at tteload
5765	 * time and are referred to as shadow hmeblks.  Now, during unload
5766	 * time, we first check if we have a shadow hmeblk for that
5767	 * translation.  The absence of one means the corresponding address
5768	 * range is empty and can be skipped.
5769	 *
5770	 * The kernel is an exception to above statement and that is why
5771	 * we don't use shadow hmeblks and hash starting from the smallest
5772	 * page size.
5773	 */
5774	if (sfmmup == KHATID) {
5775		iskernel = 1;
5776		hashno = TTE64K;
5777	} else {
5778		iskernel = 0;
5779		if (mmu_page_sizes == max_mmu_page_sizes) {
5780			hashno = TTE256M;
5781		} else {
5782			hashno = TTE4M;
5783		}
5784	}
5785	while (addr < endaddr) {
5786		hmeshift = HME_HASH_SHIFT(hashno);
5787		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5788		hblktag.htag_rehash = hashno;
5789		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5790
5791		SFMMU_HASH_LOCK(hmebp);
5792
5793		HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5794		if (hmeblkp == NULL) {
5795			/*
5796			 * didn't find an hmeblk. skip the appropiate
5797			 * address range.
5798			 */
5799			SFMMU_HASH_UNLOCK(hmebp);
5800			if (iskernel) {
5801				if (hashno < mmu_hashcnt) {
5802					hashno++;
5803					continue;
5804				} else {
5805					hashno = TTE64K;
5806					addr = (caddr_t)roundup((uintptr_t)addr
5807					    + 1, MMU_PAGESIZE64K);
5808					continue;
5809				}
5810			}
5811			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5812			    (1 << hmeshift));
5813			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5814				ASSERT(hashno == TTE64K);
5815				continue;
5816			}
5817			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5818				hashno = TTE512K;
5819				continue;
5820			}
5821			if (mmu_page_sizes == max_mmu_page_sizes) {
5822				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5823					hashno = TTE4M;
5824					continue;
5825				}
5826				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5827					hashno = TTE32M;
5828					continue;
5829				}
5830				hashno = TTE256M;
5831				continue;
5832			} else {
5833				hashno = TTE4M;
5834				continue;
5835			}
5836		}
5837		ASSERT(hmeblkp);
5838		ASSERT(!hmeblkp->hblk_shared);
5839		if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5840			/*
5841			 * If the valid count is zero we can skip the range
5842			 * mapped by this hmeblk.
5843			 * We free hblks in the case of HAT_UNMAP.  HAT_UNMAP
5844			 * is used by segment drivers as a hint
5845			 * that the mapping resource won't be used any longer.
5846			 * The best example of this is during exit().
5847			 */
5848			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5849			    get_hblk_span(hmeblkp));
5850			if ((flags & HAT_UNLOAD_UNMAP) ||
5851			    (iskernel && !issegkmap)) {
5852				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5853				    &list, 0);
5854			}
5855			SFMMU_HASH_UNLOCK(hmebp);
5856
5857			if (iskernel) {
5858				hashno = TTE64K;
5859				continue;
5860			}
5861			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5862				ASSERT(hashno == TTE64K);
5863				continue;
5864			}
5865			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5866				hashno = TTE512K;
5867				continue;
5868			}
5869			if (mmu_page_sizes == max_mmu_page_sizes) {
5870				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5871					hashno = TTE4M;
5872					continue;
5873				}
5874				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5875					hashno = TTE32M;
5876					continue;
5877				}
5878				hashno = TTE256M;
5879				continue;
5880			} else {
5881				hashno = TTE4M;
5882				continue;
5883			}
5884		}
5885		if (hmeblkp->hblk_shw_bit) {
5886			/*
5887			 * If we encounter a shadow hmeblk we know there is
5888			 * smaller sized hmeblks mapping the same address space.
5889			 * Decrement the hash size and rehash.
5890			 */
5891			ASSERT(sfmmup != KHATID);
5892			hashno--;
5893			SFMMU_HASH_UNLOCK(hmebp);
5894			continue;
5895		}
5896
5897		/*
5898		 * track callback address ranges.
5899		 * only start a new range when it's not contiguous
5900		 */
5901		if (callback != NULL) {
5902			if (addr_count > 0 &&
5903			    addr == cb_end_addr[addr_count - 1])
5904				--addr_count;
5905			else
5906				cb_start_addr[addr_count] = addr;
5907		}
5908
5909		addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5910		    dmrp, flags);
5911
5912		if (callback != NULL)
5913			cb_end_addr[addr_count++] = addr;
5914
5915		if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5916		    !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5917			sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5918		}
5919		SFMMU_HASH_UNLOCK(hmebp);
5920
5921		/*
5922		 * Notify our caller as to exactly which pages
5923		 * have been unloaded. We do these in clumps,
5924		 * to minimize the number of xt_sync()s that need to occur.
5925		 */
5926		if (callback != NULL && addr_count == MAX_CB_ADDR) {
5927			DEMAP_RANGE_FLUSH(dmrp);
5928			if (dmrp != NULL) {
5929				cpuset = sfmmup->sfmmu_cpusran;
5930				xt_sync(cpuset);
5931			}
5932
5933			for (a = 0; a < MAX_CB_ADDR; ++a) {
5934				callback->hcb_start_addr = cb_start_addr[a];
5935				callback->hcb_end_addr = cb_end_addr[a];
5936				callback->hcb_function(callback);
5937			}
5938			addr_count = 0;
5939		}
5940		if (iskernel) {
5941			hashno = TTE64K;
5942			continue;
5943		}
5944		if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5945			ASSERT(hashno == TTE64K);
5946			continue;
5947		}
5948		if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5949			hashno = TTE512K;
5950			continue;
5951		}
5952		if (mmu_page_sizes == max_mmu_page_sizes) {
5953			if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5954				hashno = TTE4M;
5955				continue;
5956			}
5957			if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5958				hashno = TTE32M;
5959				continue;
5960			}
5961			hashno = TTE256M;
5962		} else {
5963			hashno = TTE4M;
5964		}
5965	}
5966
5967	sfmmu_hblks_list_purge(&list, 0);
5968	DEMAP_RANGE_FLUSH(dmrp);
5969	if (dmrp != NULL) {
5970		cpuset = sfmmup->sfmmu_cpusran;
5971		xt_sync(cpuset);
5972	}
5973	if (callback && addr_count != 0) {
5974		for (a = 0; a < addr_count; ++a) {
5975			callback->hcb_start_addr = cb_start_addr[a];
5976			callback->hcb_end_addr = cb_end_addr[a];
5977			callback->hcb_function(callback);
5978		}
5979	}
5980
5981	/*
5982	 * Check TSB and TLB page sizes if the process isn't exiting.
5983	 */
5984	if (!sfmmup->sfmmu_free)
5985		sfmmu_check_page_sizes(sfmmup, 0);
5986}
5987
5988/*
5989 * Unload all the mappings in the range [addr..addr+len). addr and len must
5990 * be MMU_PAGESIZE aligned.
5991 */
5992void
5993hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5994{
5995	if (sfmmup->sfmmu_xhat_provider) {
5996		XHAT_UNLOAD(sfmmup, addr, len, flags);
5997		return;
5998	}
5999	hat_unload_callback(sfmmup, addr, len, flags, NULL);
6000}
6001
6002
6003/*
6004 * Find the largest mapping size for this page.
6005 */
6006int
6007fnd_mapping_sz(page_t *pp)
6008{
6009	int sz;
6010	int p_index;
6011
6012	p_index = PP_MAPINDEX(pp);
6013
6014	sz = 0;
6015	p_index >>= 1;	/* don't care about 8K bit */
6016	for (; p_index; p_index >>= 1) {
6017		sz++;
6018	}
6019
6020	return (sz);
6021}
6022
6023/*
6024 * This function unloads a range of addresses for an hmeblk.
6025 * It returns the next address to be unloaded.
6026 * It should be called with the hash lock held.
6027 */
6028static caddr_t
6029sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6030	caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
6031{
6032	tte_t	tte, ttemod;
6033	struct	sf_hment *sfhmep;
6034	int	ttesz;
6035	long	ttecnt;
6036	page_t *pp;
6037	kmutex_t *pml;
6038	int ret;
6039	int use_demap_range;
6040
6041	ASSERT(in_hblk_range(hmeblkp, addr));
6042	ASSERT(!hmeblkp->hblk_shw_bit);
6043	ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
6044	ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
6045	ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
6046
6047#ifdef DEBUG
6048	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
6049	    (endaddr < get_hblk_endaddr(hmeblkp))) {
6050		panic("sfmmu_hblk_unload: partial unload of large page");
6051	}
6052#endif /* DEBUG */
6053
6054	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6055	ttesz = get_hblk_ttesz(hmeblkp);
6056
6057	use_demap_range = ((dmrp == NULL) ||
6058	    (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
6059
6060	if (use_demap_range) {
6061		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
6062	} else {
6063		DEMAP_RANGE_FLUSH(dmrp);
6064	}
6065	ttecnt = 0;
6066	HBLKTOHME(sfhmep, hmeblkp, addr);
6067
6068	while (addr < endaddr) {
6069		pml = NULL;
6070		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6071		if (TTE_IS_VALID(&tte)) {
6072			pp = sfhmep->hme_page;
6073			if (pp != NULL) {
6074				pml = sfmmu_mlist_enter(pp);
6075			}
6076
6077			/*
6078			 * Verify if hme still points to 'pp' now that
6079			 * we have p_mapping lock.
6080			 */
6081			if (sfhmep->hme_page != pp) {
6082				if (pp != NULL && sfhmep->hme_page != NULL) {
6083					ASSERT(pml != NULL);
6084					sfmmu_mlist_exit(pml);
6085					/* Re-start this iteration. */
6086					continue;
6087				}
6088				ASSERT((pp != NULL) &&
6089				    (sfhmep->hme_page == NULL));
6090				goto tte_unloaded;
6091			}
6092
6093			/*
6094			 * This point on we have both HASH and p_mapping
6095			 * lock.
6096			 */
6097			ASSERT(pp == sfhmep->hme_page);
6098			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6099
6100			/*
6101			 * We need to loop on modify tte because it is
6102			 * possible for pagesync to come along and
6103			 * change the software bits beneath us.
6104			 *
6105			 * Page_unload can also invalidate the tte after
6106			 * we read tte outside of p_mapping lock.
6107			 */
6108again:
6109			ttemod = tte;
6110
6111			TTE_SET_INVALID(&ttemod);
6112			ret = sfmmu_modifytte_try(&tte, &ttemod,
6113			    &sfhmep->hme_tte);
6114
6115			if (ret <= 0) {
6116				if (TTE_IS_VALID(&tte)) {
6117					ASSERT(ret < 0);
6118					goto again;
6119				}
6120				if (pp != NULL) {
6121					panic("sfmmu_hblk_unload: pp = 0x%p "
6122					    "tte became invalid under mlist"
6123					    " lock = 0x%p", (void *)pp,
6124					    (void *)pml);
6125				}
6126				continue;
6127			}
6128
6129			if (!(flags & HAT_UNLOAD_NOSYNC)) {
6130				sfmmu_ttesync(sfmmup, addr, &tte, pp);
6131			}
6132
6133			/*
6134			 * Ok- we invalidated the tte. Do the rest of the job.
6135			 */
6136			ttecnt++;
6137
6138			if (flags & HAT_UNLOAD_UNLOCK) {
6139				ASSERT(hmeblkp->hblk_lckcnt > 0);
6140				atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
6141				HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6142			}
6143
6144			/*
6145			 * Normally we would need to flush the page
6146			 * from the virtual cache at this point in
6147			 * order to prevent a potential cache alias
6148			 * inconsistency.
6149			 * The particular scenario we need to worry
6150			 * about is:
6151			 * Given:  va1 and va2 are two virtual address
6152			 * that alias and map the same physical
6153			 * address.
6154			 * 1.   mapping exists from va1 to pa and data
6155			 * has been read into the cache.
6156			 * 2.   unload va1.
6157			 * 3.   load va2 and modify data using va2.
6158			 * 4    unload va2.
6159			 * 5.   load va1 and reference data.  Unless we
6160			 * flush the data cache when we unload we will
6161			 * get stale data.
6162			 * Fortunately, page coloring eliminates the
6163			 * above scenario by remembering the color a
6164			 * physical page was last or is currently
6165			 * mapped to.  Now, we delay the flush until
6166			 * the loading of translations.  Only when the
6167			 * new translation is of a different color
6168			 * are we forced to flush.
6169			 */
6170			if (use_demap_range) {
6171				/*
6172				 * Mark this page as needing a demap.
6173				 */
6174				DEMAP_RANGE_MARKPG(dmrp, addr);
6175			} else {
6176				ASSERT(sfmmup != NULL);
6177				ASSERT(!hmeblkp->hblk_shared);
6178				sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6179				    sfmmup->sfmmu_free, 0);
6180			}
6181
6182			if (pp) {
6183				/*
6184				 * Remove the hment from the mapping list
6185				 */
6186				ASSERT(hmeblkp->hblk_hmecnt > 0);
6187
6188				/*
6189				 * Again, we cannot
6190				 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6191				 */
6192				HME_SUB(sfhmep, pp);
6193				membar_stst();
6194				atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
6195			}
6196
6197			ASSERT(hmeblkp->hblk_vcnt > 0);
6198			atomic_add_16(&hmeblkp->hblk_vcnt, -1);
6199
6200			ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6201			    !hmeblkp->hblk_lckcnt);
6202
6203#ifdef VAC
6204			if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6205				if (PP_ISTNC(pp)) {
6206					/*
6207					 * If page was temporary
6208					 * uncached, try to recache
6209					 * it. Note that HME_SUB() was
6210					 * called above so p_index and
6211					 * mlist had been updated.
6212					 */
6213					conv_tnc(pp, ttesz);
6214				} else if (pp->p_mapping == NULL) {
6215					ASSERT(kpm_enable);
6216					/*
6217					 * Page is marked to be in VAC conflict
6218					 * to an existing kpm mapping and/or is
6219					 * kpm mapped using only the regular
6220					 * pagesize.
6221					 */
6222					sfmmu_kpm_hme_unload(pp);
6223				}
6224			}
6225#endif	/* VAC */
6226		} else if ((pp = sfhmep->hme_page) != NULL) {
6227				/*
6228				 * TTE is invalid but the hme
6229				 * still exists. let pageunload
6230				 * complete its job.
6231				 */
6232				ASSERT(pml == NULL);
6233				pml = sfmmu_mlist_enter(pp);
6234				if (sfhmep->hme_page != NULL) {
6235					sfmmu_mlist_exit(pml);
6236					continue;
6237				}
6238				ASSERT(sfhmep->hme_page == NULL);
6239		} else if (hmeblkp->hblk_hmecnt != 0) {
6240			/*
6241			 * pageunload may have not finished decrementing
6242			 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6243			 * wait for pageunload to finish. Rely on pageunload
6244			 * to decrement hblk_hmecnt after hblk_vcnt.
6245			 */
6246			pfn_t pfn = TTE_TO_TTEPFN(&tte);
6247			ASSERT(pml == NULL);
6248			if (pf_is_memory(pfn)) {
6249				pp = page_numtopp_nolock(pfn);
6250				if (pp != NULL) {
6251					pml = sfmmu_mlist_enter(pp);
6252					sfmmu_mlist_exit(pml);
6253					pml = NULL;
6254				}
6255			}
6256		}
6257
6258tte_unloaded:
6259		/*
6260		 * At this point, the tte we are looking at
6261		 * should be unloaded, and hme has been unlinked
6262		 * from page too. This is important because in
6263		 * pageunload, it does ttesync() then HME_SUB.
6264		 * We need to make sure HME_SUB has been completed
6265		 * so we know ttesync() has been completed. Otherwise,
6266		 * at exit time, after return from hat layer, VM will
6267		 * release as structure which hat_setstat() (called
6268		 * by ttesync()) needs.
6269		 */
6270#ifdef DEBUG
6271		{
6272			tte_t	dtte;
6273
6274			ASSERT(sfhmep->hme_page == NULL);
6275
6276			sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6277			ASSERT(!TTE_IS_VALID(&dtte));
6278		}
6279#endif
6280
6281		if (pml) {
6282			sfmmu_mlist_exit(pml);
6283		}
6284
6285		addr += TTEBYTES(ttesz);
6286		sfhmep++;
6287		DEMAP_RANGE_NEXTPG(dmrp);
6288	}
6289	/*
6290	 * For shared hmeblks this routine is only called when region is freed
6291	 * and no longer referenced.  So no need to decrement ttecnt
6292	 * in the region structure here.
6293	 */
6294	if (ttecnt > 0 && sfmmup != NULL) {
6295		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6296	}
6297	return (addr);
6298}
6299
6300/*
6301 * Invalidate a virtual address range for the local CPU.
6302 * For best performance ensure that the va range is completely
6303 * mapped, otherwise the entire TLB will be flushed.
6304 */
6305void
6306hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6307{
6308	ssize_t sz;
6309	caddr_t endva = va + size;
6310
6311	while (va < endva) {
6312		sz = hat_getpagesize(sfmmup, va);
6313		if (sz < 0) {
6314			vtag_flushall();
6315			break;
6316		}
6317		vtag_flushpage(va, (uint64_t)sfmmup);
6318		va += sz;
6319	}
6320}
6321
6322/*
6323 * Synchronize all the mappings in the range [addr..addr+len).
6324 * Can be called with clearflag having two states:
6325 * HAT_SYNC_DONTZERO means just return the rm stats
6326 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6327 */
6328void
6329hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6330{
6331	struct hmehash_bucket *hmebp;
6332	hmeblk_tag hblktag;
6333	int hmeshift, hashno = 1;
6334	struct hme_blk *hmeblkp, *list = NULL;
6335	caddr_t endaddr;
6336	cpuset_t cpuset;
6337
6338	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6339	ASSERT((sfmmup == ksfmmup) ||
6340	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6341	ASSERT((len & MMU_PAGEOFFSET) == 0);
6342	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6343	    (clearflag == HAT_SYNC_ZERORM));
6344
6345	CPUSET_ZERO(cpuset);
6346
6347	endaddr = addr + len;
6348	hblktag.htag_id = sfmmup;
6349	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6350
6351	/*
6352	 * Spitfire supports 4 page sizes.
6353	 * Most pages are expected to be of the smallest page
6354	 * size (8K) and these will not need to be rehashed. 64K
6355	 * pages also don't need to be rehashed because the an hmeblk
6356	 * spans 64K of address space. 512K pages might need 1 rehash and
6357	 * and 4M pages 2 rehashes.
6358	 */
6359	while (addr < endaddr) {
6360		hmeshift = HME_HASH_SHIFT(hashno);
6361		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6362		hblktag.htag_rehash = hashno;
6363		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6364
6365		SFMMU_HASH_LOCK(hmebp);
6366
6367		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6368		if (hmeblkp != NULL) {
6369			ASSERT(!hmeblkp->hblk_shared);
6370			/*
6371			 * We've encountered a shadow hmeblk so skip the range
6372			 * of the next smaller mapping size.
6373			 */
6374			if (hmeblkp->hblk_shw_bit) {
6375				ASSERT(sfmmup != ksfmmup);
6376				ASSERT(hashno > 1);
6377				addr = (caddr_t)P2END((uintptr_t)addr,
6378				    TTEBYTES(hashno - 1));
6379			} else {
6380				addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6381				    addr, endaddr, clearflag);
6382			}
6383			SFMMU_HASH_UNLOCK(hmebp);
6384			hashno = 1;
6385			continue;
6386		}
6387		SFMMU_HASH_UNLOCK(hmebp);
6388
6389		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6390			/*
6391			 * We have traversed the whole list and rehashed
6392			 * if necessary without finding the address to sync.
6393			 * This is ok so we increment the address by the
6394			 * smallest hmeblk range for kernel mappings and the
6395			 * largest hmeblk range, to account for shadow hmeblks,
6396			 * for user mappings and continue.
6397			 */
6398			if (sfmmup == ksfmmup)
6399				addr = (caddr_t)P2END((uintptr_t)addr,
6400				    TTEBYTES(1));
6401			else
6402				addr = (caddr_t)P2END((uintptr_t)addr,
6403				    TTEBYTES(hashno));
6404			hashno = 1;
6405		} else {
6406			hashno++;
6407		}
6408	}
6409	sfmmu_hblks_list_purge(&list, 0);
6410	cpuset = sfmmup->sfmmu_cpusran;
6411	xt_sync(cpuset);
6412}
6413
6414static caddr_t
6415sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6416	caddr_t endaddr, int clearflag)
6417{
6418	tte_t	tte, ttemod;
6419	struct sf_hment *sfhmep;
6420	int ttesz;
6421	struct page *pp;
6422	kmutex_t *pml;
6423	int ret;
6424
6425	ASSERT(hmeblkp->hblk_shw_bit == 0);
6426	ASSERT(!hmeblkp->hblk_shared);
6427
6428	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6429
6430	ttesz = get_hblk_ttesz(hmeblkp);
6431	HBLKTOHME(sfhmep, hmeblkp, addr);
6432
6433	while (addr < endaddr) {
6434		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6435		if (TTE_IS_VALID(&tte)) {
6436			pml = NULL;
6437			pp = sfhmep->hme_page;
6438			if (pp) {
6439				pml = sfmmu_mlist_enter(pp);
6440			}
6441			if (pp != sfhmep->hme_page) {
6442				/*
6443				 * tte most have been unloaded
6444				 * underneath us.  Recheck
6445				 */
6446				ASSERT(pml);
6447				sfmmu_mlist_exit(pml);
6448				continue;
6449			}
6450
6451			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6452
6453			if (clearflag == HAT_SYNC_ZERORM) {
6454				ttemod = tte;
6455				TTE_CLR_RM(&ttemod);
6456				ret = sfmmu_modifytte_try(&tte, &ttemod,
6457				    &sfhmep->hme_tte);
6458				if (ret < 0) {
6459					if (pml) {
6460						sfmmu_mlist_exit(pml);
6461					}
6462					continue;
6463				}
6464
6465				if (ret > 0) {
6466					sfmmu_tlb_demap(addr, sfmmup,
6467					    hmeblkp, 0, 0);
6468				}
6469			}
6470			sfmmu_ttesync(sfmmup, addr, &tte, pp);
6471			if (pml) {
6472				sfmmu_mlist_exit(pml);
6473			}
6474		}
6475		addr += TTEBYTES(ttesz);
6476		sfhmep++;
6477	}
6478	return (addr);
6479}
6480
6481/*
6482 * This function will sync a tte to the page struct and it will
6483 * update the hat stats. Currently it allows us to pass a NULL pp
6484 * and we will simply update the stats.  We may want to change this
6485 * so we only keep stats for pages backed by pp's.
6486 */
6487static void
6488sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6489{
6490	uint_t rm = 0;
6491	int   	sz;
6492	pgcnt_t	npgs;
6493
6494	ASSERT(TTE_IS_VALID(ttep));
6495
6496	if (TTE_IS_NOSYNC(ttep)) {
6497		return;
6498	}
6499
6500	if (TTE_IS_REF(ttep))  {
6501		rm = P_REF;
6502	}
6503	if (TTE_IS_MOD(ttep))  {
6504		rm |= P_MOD;
6505	}
6506
6507	if (rm == 0) {
6508		return;
6509	}
6510
6511	sz = TTE_CSZ(ttep);
6512	if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6513		int i;
6514		caddr_t	vaddr = addr;
6515
6516		for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6517			hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6518		}
6519
6520	}
6521
6522	/*
6523	 * XXX I want to use cas to update nrm bits but they
6524	 * currently belong in common/vm and not in hat where
6525	 * they should be.
6526	 * The nrm bits are protected by the same mutex as
6527	 * the one that protects the page's mapping list.
6528	 */
6529	if (!pp)
6530		return;
6531	ASSERT(sfmmu_mlist_held(pp));
6532	/*
6533	 * If the tte is for a large page, we need to sync all the
6534	 * pages covered by the tte.
6535	 */
6536	if (sz != TTE8K) {
6537		ASSERT(pp->p_szc != 0);
6538		pp = PP_GROUPLEADER(pp, sz);
6539		ASSERT(sfmmu_mlist_held(pp));
6540	}
6541
6542	/* Get number of pages from tte size. */
6543	npgs = TTEPAGES(sz);
6544
6545	do {
6546		ASSERT(pp);
6547		ASSERT(sfmmu_mlist_held(pp));
6548		if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6549		    ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6550			hat_page_setattr(pp, rm);
6551
6552		/*
6553		 * Are we done? If not, we must have a large mapping.
6554		 * For large mappings we need to sync the rest of the pages
6555		 * covered by this tte; goto the next page.
6556		 */
6557	} while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6558}
6559
6560/*
6561 * Execute pre-callback handler of each pa_hment linked to pp
6562 *
6563 * Inputs:
6564 *   flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6565 *   capture_cpus: pointer to return value (below)
6566 *
6567 * Returns:
6568 *   Propagates the subsystem callback return values back to the caller;
6569 *   returns 0 on success.  If capture_cpus is non-NULL, the value returned
6570 *   is zero if all of the pa_hments are of a type that do not require
6571 *   capturing CPUs prior to suspending the mapping, else it is 1.
6572 */
6573static int
6574hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6575{
6576	struct sf_hment	*sfhmep;
6577	struct pa_hment *pahmep;
6578	int (*f)(caddr_t, uint_t, uint_t, void *);
6579	int		ret;
6580	id_t		id;
6581	int		locked = 0;
6582	kmutex_t	*pml;
6583
6584	ASSERT(PAGE_EXCL(pp));
6585	if (!sfmmu_mlist_held(pp)) {
6586		pml = sfmmu_mlist_enter(pp);
6587		locked = 1;
6588	}
6589
6590	if (capture_cpus)
6591		*capture_cpus = 0;
6592
6593top:
6594	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6595		/*
6596		 * skip sf_hments corresponding to VA<->PA mappings;
6597		 * for pa_hment's, hme_tte.ll is zero
6598		 */
6599		if (!IS_PAHME(sfhmep))
6600			continue;
6601
6602		pahmep = sfhmep->hme_data;
6603		ASSERT(pahmep != NULL);
6604
6605		/*
6606		 * skip if pre-handler has been called earlier in this loop
6607		 */
6608		if (pahmep->flags & flag)
6609			continue;
6610
6611		id = pahmep->cb_id;
6612		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6613		if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6614			*capture_cpus = 1;
6615		if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6616			pahmep->flags |= flag;
6617			continue;
6618		}
6619
6620		/*
6621		 * Drop the mapping list lock to avoid locking order issues.
6622		 */
6623		if (locked)
6624			sfmmu_mlist_exit(pml);
6625
6626		ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6627		if (ret != 0)
6628			return (ret);	/* caller must do the cleanup */
6629
6630		if (locked) {
6631			pml = sfmmu_mlist_enter(pp);
6632			pahmep->flags |= flag;
6633			goto top;
6634		}
6635
6636		pahmep->flags |= flag;
6637	}
6638
6639	if (locked)
6640		sfmmu_mlist_exit(pml);
6641
6642	return (0);
6643}
6644
6645/*
6646 * Execute post-callback handler of each pa_hment linked to pp
6647 *
6648 * Same overall assumptions and restrictions apply as for
6649 * hat_pageprocess_precallbacks().
6650 */
6651static void
6652hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6653{
6654	pfn_t pgpfn = pp->p_pagenum;
6655	pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6656	pfn_t newpfn;
6657	struct sf_hment *sfhmep;
6658	struct pa_hment *pahmep;
6659	int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6660	id_t	id;
6661	int	locked = 0;
6662	kmutex_t *pml;
6663
6664	ASSERT(PAGE_EXCL(pp));
6665	if (!sfmmu_mlist_held(pp)) {
6666		pml = sfmmu_mlist_enter(pp);
6667		locked = 1;
6668	}
6669
6670top:
6671	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6672		/*
6673		 * skip sf_hments corresponding to VA<->PA mappings;
6674		 * for pa_hment's, hme_tte.ll is zero
6675		 */
6676		if (!IS_PAHME(sfhmep))
6677			continue;
6678
6679		pahmep = sfhmep->hme_data;
6680		ASSERT(pahmep != NULL);
6681
6682		if ((pahmep->flags & flag) == 0)
6683			continue;
6684
6685		pahmep->flags &= ~flag;
6686
6687		id = pahmep->cb_id;
6688		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6689		if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6690			continue;
6691
6692		/*
6693		 * Convert the base page PFN into the constituent PFN
6694		 * which is needed by the callback handler.
6695		 */
6696		newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6697
6698		/*
6699		 * Drop the mapping list lock to avoid locking order issues.
6700		 */
6701		if (locked)
6702			sfmmu_mlist_exit(pml);
6703
6704		if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6705		    != 0)
6706			panic("sfmmu: posthandler failed");
6707
6708		if (locked) {
6709			pml = sfmmu_mlist_enter(pp);
6710			goto top;
6711		}
6712	}
6713
6714	if (locked)
6715		sfmmu_mlist_exit(pml);
6716}
6717
6718/*
6719 * Suspend locked kernel mapping
6720 */
6721void
6722hat_pagesuspend(struct page *pp)
6723{
6724	struct sf_hment *sfhmep;
6725	sfmmu_t *sfmmup;
6726	tte_t tte, ttemod;
6727	struct hme_blk *hmeblkp;
6728	caddr_t addr;
6729	int index, cons;
6730	cpuset_t cpuset;
6731
6732	ASSERT(PAGE_EXCL(pp));
6733	ASSERT(sfmmu_mlist_held(pp));
6734
6735	mutex_enter(&kpr_suspendlock);
6736
6737	/*
6738	 * We're about to suspend a kernel mapping so mark this thread as
6739	 * non-traceable by DTrace. This prevents us from running into issues
6740	 * with probe context trying to touch a suspended page
6741	 * in the relocation codepath itself.
6742	 */
6743	curthread->t_flag |= T_DONTDTRACE;
6744
6745	index = PP_MAPINDEX(pp);
6746	cons = TTE8K;
6747
6748retry:
6749	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6750
6751		if (IS_PAHME(sfhmep))
6752			continue;
6753
6754		if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6755			continue;
6756
6757		/*
6758		 * Loop until we successfully set the suspend bit in
6759		 * the TTE.
6760		 */
6761again:
6762		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6763		ASSERT(TTE_IS_VALID(&tte));
6764
6765		ttemod = tte;
6766		TTE_SET_SUSPEND(&ttemod);
6767		if (sfmmu_modifytte_try(&tte, &ttemod,
6768		    &sfhmep->hme_tte) < 0)
6769			goto again;
6770
6771		/*
6772		 * Invalidate TSB entry
6773		 */
6774		hmeblkp = sfmmu_hmetohblk(sfhmep);
6775
6776		sfmmup = hblktosfmmu(hmeblkp);
6777		ASSERT(sfmmup == ksfmmup);
6778		ASSERT(!hmeblkp->hblk_shared);
6779
6780		addr = tte_to_vaddr(hmeblkp, tte);
6781
6782		/*
6783		 * No need to make sure that the TSB for this sfmmu is
6784		 * not being relocated since it is ksfmmup and thus it
6785		 * will never be relocated.
6786		 */
6787		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6788
6789		/*
6790		 * Update xcall stats
6791		 */
6792		cpuset = cpu_ready_set;
6793		CPUSET_DEL(cpuset, CPU->cpu_id);
6794
6795		/* LINTED: constant in conditional context */
6796		SFMMU_XCALL_STATS(ksfmmup);
6797
6798		/*
6799		 * Flush TLB entry on remote CPU's
6800		 */
6801		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6802		    (uint64_t)ksfmmup);
6803		xt_sync(cpuset);
6804
6805		/*
6806		 * Flush TLB entry on local CPU
6807		 */
6808		vtag_flushpage(addr, (uint64_t)ksfmmup);
6809	}
6810
6811	while (index != 0) {
6812		index = index >> 1;
6813		if (index != 0)
6814			cons++;
6815		if (index & 0x1) {
6816			pp = PP_GROUPLEADER(pp, cons);
6817			goto retry;
6818		}
6819	}
6820}
6821
6822#ifdef	DEBUG
6823
6824#define	N_PRLE	1024
6825struct prle {
6826	page_t *targ;
6827	page_t *repl;
6828	int status;
6829	int pausecpus;
6830	hrtime_t whence;
6831};
6832
6833static struct prle page_relocate_log[N_PRLE];
6834static int prl_entry;
6835static kmutex_t prl_mutex;
6836
6837#define	PAGE_RELOCATE_LOG(t, r, s, p)					\
6838	mutex_enter(&prl_mutex);					\
6839	page_relocate_log[prl_entry].targ = *(t);			\
6840	page_relocate_log[prl_entry].repl = *(r);			\
6841	page_relocate_log[prl_entry].status = (s);			\
6842	page_relocate_log[prl_entry].pausecpus = (p);			\
6843	page_relocate_log[prl_entry].whence = gethrtime();		\
6844	prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1;	\
6845	mutex_exit(&prl_mutex);
6846
6847#else	/* !DEBUG */
6848#define	PAGE_RELOCATE_LOG(t, r, s, p)
6849#endif
6850
6851/*
6852 * Core Kernel Page Relocation Algorithm
6853 *
6854 * Input:
6855 *
6856 * target : 	constituent pages are SE_EXCL locked.
6857 * replacement:	constituent pages are SE_EXCL locked.
6858 *
6859 * Output:
6860 *
6861 * nrelocp:	number of pages relocated
6862 */
6863int
6864hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6865{
6866	page_t		*targ, *repl;
6867	page_t		*tpp, *rpp;
6868	kmutex_t	*low, *high;
6869	spgcnt_t	npages, i;
6870	page_t		*pl = NULL;
6871	int		old_pil;
6872	cpuset_t	cpuset;
6873	int		cap_cpus;
6874	int		ret;
6875#ifdef VAC
6876	int		cflags = 0;
6877#endif
6878
6879	if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) {
6880		PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6881		return (EAGAIN);
6882	}
6883
6884	mutex_enter(&kpr_mutex);
6885	kreloc_thread = curthread;
6886
6887	targ = *target;
6888	repl = *replacement;
6889	ASSERT(repl != NULL);
6890	ASSERT(targ->p_szc == repl->p_szc);
6891
6892	npages = page_get_pagecnt(targ->p_szc);
6893
6894	/*
6895	 * unload VA<->PA mappings that are not locked
6896	 */
6897	tpp = targ;
6898	for (i = 0; i < npages; i++) {
6899		(void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6900		tpp++;
6901	}
6902
6903	/*
6904	 * Do "presuspend" callbacks, in a context from which we can still
6905	 * block as needed. Note that we don't hold the mapping list lock
6906	 * of "targ" at this point due to potential locking order issues;
6907	 * we assume that between the hat_pageunload() above and holding
6908	 * the SE_EXCL lock that the mapping list *cannot* change at this
6909	 * point.
6910	 */
6911	ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6912	if (ret != 0) {
6913		/*
6914		 * EIO translates to fatal error, for all others cleanup
6915		 * and return EAGAIN.
6916		 */
6917		ASSERT(ret != EIO);
6918		hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6919		PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6920		kreloc_thread = NULL;
6921		mutex_exit(&kpr_mutex);
6922		return (EAGAIN);
6923	}
6924
6925	/*
6926	 * acquire p_mapping list lock for both the target and replacement
6927	 * root pages.
6928	 *
6929	 * low and high refer to the need to grab the mlist locks in a
6930	 * specific order in order to prevent race conditions.  Thus the
6931	 * lower lock must be grabbed before the higher lock.
6932	 *
6933	 * This will block hat_unload's accessing p_mapping list.  Since
6934	 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6935	 * blocked.  Thus, no one else will be accessing the p_mapping list
6936	 * while we suspend and reload the locked mapping below.
6937	 */
6938	tpp = targ;
6939	rpp = repl;
6940	sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6941
6942	kpreempt_disable();
6943
6944	/*
6945	 * We raise our PIL to 13 so that we don't get captured by
6946	 * another CPU or pinned by an interrupt thread.  We can't go to
6947	 * PIL 14 since the nexus driver(s) may need to interrupt at
6948	 * that level in the case of IOMMU pseudo mappings.
6949	 */
6950	cpuset = cpu_ready_set;
6951	CPUSET_DEL(cpuset, CPU->cpu_id);
6952	if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6953		old_pil = splr(XCALL_PIL);
6954	} else {
6955		old_pil = -1;
6956		xc_attention(cpuset);
6957	}
6958	ASSERT(getpil() == XCALL_PIL);
6959
6960	/*
6961	 * Now do suspend callbacks. In the case of an IOMMU mapping
6962	 * this will suspend all DMA activity to the page while it is
6963	 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6964	 * may be captured at this point we should have acquired any needed
6965	 * locks in the presuspend callback.
6966	 */
6967	ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6968	if (ret != 0) {
6969		repl = targ;
6970		goto suspend_fail;
6971	}
6972
6973	/*
6974	 * Raise the PIL yet again, this time to block all high-level
6975	 * interrupts on this CPU. This is necessary to prevent an
6976	 * interrupt routine from pinning the thread which holds the
6977	 * mapping suspended and then touching the suspended page.
6978	 *
6979	 * Once the page is suspended we also need to be careful to
6980	 * avoid calling any functions which touch any seg_kmem memory
6981	 * since that memory may be backed by the very page we are
6982	 * relocating in here!
6983	 */
6984	hat_pagesuspend(targ);
6985
6986	/*
6987	 * Now that we are confident everybody has stopped using this page,
6988	 * copy the page contents.  Note we use a physical copy to prevent
6989	 * locking issues and to avoid fpRAS because we can't handle it in
6990	 * this context.
6991	 */
6992	for (i = 0; i < npages; i++, tpp++, rpp++) {
6993#ifdef VAC
6994		/*
6995		 * If the replacement has a different vcolor than
6996		 * the one being replacd, we need to handle VAC
6997		 * consistency for it just as we were setting up
6998		 * a new mapping to it.
6999		 */
7000		if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
7001		    (tpp->p_vcolor != rpp->p_vcolor) &&
7002		    !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
7003			CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
7004			sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
7005			    rpp->p_pagenum);
7006		}
7007#endif
7008		/*
7009		 * Copy the contents of the page.
7010		 */
7011		ppcopy_kernel(tpp, rpp);
7012	}
7013
7014	tpp = targ;
7015	rpp = repl;
7016	for (i = 0; i < npages; i++, tpp++, rpp++) {
7017		/*
7018		 * Copy attributes.  VAC consistency was handled above,
7019		 * if required.
7020		 */
7021		rpp->p_nrm = tpp->p_nrm;
7022		tpp->p_nrm = 0;
7023		rpp->p_index = tpp->p_index;
7024		tpp->p_index = 0;
7025#ifdef VAC
7026		rpp->p_vcolor = tpp->p_vcolor;
7027#endif
7028	}
7029
7030	/*
7031	 * First, unsuspend the page, if we set the suspend bit, and transfer
7032	 * the mapping list from the target page to the replacement page.
7033	 * Next process postcallbacks; since pa_hment's are linked only to the
7034	 * p_mapping list of root page, we don't iterate over the constituent
7035	 * pages.
7036	 */
7037	hat_pagereload(targ, repl);
7038
7039suspend_fail:
7040	hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
7041
7042	/*
7043	 * Now lower our PIL and release any captured CPUs since we
7044	 * are out of the "danger zone".  After this it will again be
7045	 * safe to acquire adaptive mutex locks, or to drop them...
7046	 */
7047	if (old_pil != -1) {
7048		splx(old_pil);
7049	} else {
7050		xc_dismissed(cpuset);
7051	}
7052
7053	kpreempt_enable();
7054
7055	sfmmu_mlist_reloc_exit(low, high);
7056
7057	/*
7058	 * Postsuspend callbacks should drop any locks held across
7059	 * the suspend callbacks.  As before, we don't hold the mapping
7060	 * list lock at this point.. our assumption is that the mapping
7061	 * list still can't change due to our holding SE_EXCL lock and
7062	 * there being no unlocked mappings left. Hence the restriction
7063	 * on calling context to hat_delete_callback()
7064	 */
7065	hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
7066	if (ret != 0) {
7067		/*
7068		 * The second presuspend call failed: we got here through
7069		 * the suspend_fail label above.
7070		 */
7071		ASSERT(ret != EIO);
7072		PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
7073		kreloc_thread = NULL;
7074		mutex_exit(&kpr_mutex);
7075		return (EAGAIN);
7076	}
7077
7078	/*
7079	 * Now that we're out of the performance critical section we can
7080	 * take care of updating the hash table, since we still
7081	 * hold all the pages locked SE_EXCL at this point we
7082	 * needn't worry about things changing out from under us.
7083	 */
7084	tpp = targ;
7085	rpp = repl;
7086	for (i = 0; i < npages; i++, tpp++, rpp++) {
7087
7088		/*
7089		 * replace targ with replacement in page_hash table
7090		 */
7091		targ = tpp;
7092		page_relocate_hash(rpp, targ);
7093
7094		/*
7095		 * concatenate target; caller of platform_page_relocate()
7096		 * expects target to be concatenated after returning.
7097		 */
7098		ASSERT(targ->p_next == targ);
7099		ASSERT(targ->p_prev == targ);
7100		page_list_concat(&pl, &targ);
7101	}
7102
7103	ASSERT(*target == pl);
7104	*nrelocp = npages;
7105	PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
7106	kreloc_thread = NULL;
7107	mutex_exit(&kpr_mutex);
7108	return (0);
7109}
7110
7111/*
7112 * Called when stray pa_hments are found attached to a page which is
7113 * being freed.  Notify the subsystem which attached the pa_hment of
7114 * the error if it registered a suitable handler, else panic.
7115 */
7116static void
7117sfmmu_pahment_leaked(struct pa_hment *pahmep)
7118{
7119	id_t cb_id = pahmep->cb_id;
7120
7121	ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7122	if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7123		if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7124		    HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7125			return;		/* non-fatal */
7126	}
7127	panic("pa_hment leaked: 0x%p", (void *)pahmep);
7128}
7129
7130/*
7131 * Remove all mappings to page 'pp'.
7132 */
7133int
7134hat_pageunload(struct page *pp, uint_t forceflag)
7135{
7136	struct page *origpp = pp;
7137	struct sf_hment *sfhme, *tmphme;
7138	struct hme_blk *hmeblkp;
7139	kmutex_t *pml;
7140#ifdef VAC
7141	kmutex_t *pmtx;
7142#endif
7143	cpuset_t cpuset, tset;
7144	int index, cons;
7145	int xhme_blks;
7146	int pa_hments;
7147
7148	ASSERT(PAGE_EXCL(pp));
7149
7150retry_xhat:
7151	tmphme = NULL;
7152	xhme_blks = 0;
7153	pa_hments = 0;
7154	CPUSET_ZERO(cpuset);
7155
7156	pml = sfmmu_mlist_enter(pp);
7157
7158#ifdef VAC
7159	if (pp->p_kpmref)
7160		sfmmu_kpm_pageunload(pp);
7161	ASSERT(!PP_ISMAPPED_KPM(pp));
7162#endif
7163	/*
7164	 * Clear vpm reference. Since the page is exclusively locked
7165	 * vpm cannot be referencing it.
7166	 */
7167	if (vpm_enable) {
7168		pp->p_vpmref = 0;
7169	}
7170
7171	index = PP_MAPINDEX(pp);
7172	cons = TTE8K;
7173retry:
7174	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7175		tmphme = sfhme->hme_next;
7176
7177		if (IS_PAHME(sfhme)) {
7178			ASSERT(sfhme->hme_data != NULL);
7179			pa_hments++;
7180			continue;
7181		}
7182
7183		hmeblkp = sfmmu_hmetohblk(sfhme);
7184		if (hmeblkp->hblk_xhat_bit) {
7185			struct xhat_hme_blk *xblk =
7186			    (struct xhat_hme_blk *)hmeblkp;
7187
7188			(void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7189			    pp, forceflag, XBLK2PROVBLK(xblk));
7190
7191			xhme_blks = 1;
7192			continue;
7193		}
7194
7195		/*
7196		 * If there are kernel mappings don't unload them, they will
7197		 * be suspended.
7198		 */
7199		if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7200		    hmeblkp->hblk_tag.htag_id == ksfmmup)
7201			continue;
7202
7203		tset = sfmmu_pageunload(pp, sfhme, cons);
7204		CPUSET_OR(cpuset, tset);
7205	}
7206
7207	while (index != 0) {
7208		index = index >> 1;
7209		if (index != 0)
7210			cons++;
7211		if (index & 0x1) {
7212			/* Go to leading page */
7213			pp = PP_GROUPLEADER(pp, cons);
7214			ASSERT(sfmmu_mlist_held(pp));
7215			goto retry;
7216		}
7217	}
7218
7219	/*
7220	 * cpuset may be empty if the page was only mapped by segkpm,
7221	 * in which case we won't actually cross-trap.
7222	 */
7223	xt_sync(cpuset);
7224
7225	/*
7226	 * The page should have no mappings at this point, unless
7227	 * we were called from hat_page_relocate() in which case we
7228	 * leave the locked mappings which will be suspended later.
7229	 */
7230	ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
7231	    (forceflag == SFMMU_KERNEL_RELOC));
7232
7233#ifdef VAC
7234	if (PP_ISTNC(pp)) {
7235		if (cons == TTE8K) {
7236			pmtx = sfmmu_page_enter(pp);
7237			PP_CLRTNC(pp);
7238			sfmmu_page_exit(pmtx);
7239		} else {
7240			conv_tnc(pp, cons);
7241		}
7242	}
7243#endif	/* VAC */
7244
7245	if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7246		/*
7247		 * Unlink any pa_hments and free them, calling back
7248		 * the responsible subsystem to notify it of the error.
7249		 * This can occur in situations such as drivers leaking
7250		 * DMA handles: naughty, but common enough that we'd like
7251		 * to keep the system running rather than bringing it
7252		 * down with an obscure error like "pa_hment leaked"
7253		 * which doesn't aid the user in debugging their driver.
7254		 */
7255		for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7256			tmphme = sfhme->hme_next;
7257			if (IS_PAHME(sfhme)) {
7258				struct pa_hment *pahmep = sfhme->hme_data;
7259				sfmmu_pahment_leaked(pahmep);
7260				HME_SUB(sfhme, pp);
7261				kmem_cache_free(pa_hment_cache, pahmep);
7262			}
7263		}
7264
7265		ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
7266	}
7267
7268	sfmmu_mlist_exit(pml);
7269
7270	/*
7271	 * XHAT may not have finished unloading pages
7272	 * because some other thread was waiting for
7273	 * mlist lock and XHAT_PAGEUNLOAD let it do
7274	 * the job.
7275	 */
7276	if (xhme_blks) {
7277		pp = origpp;
7278		goto retry_xhat;
7279	}
7280
7281	return (0);
7282}
7283
7284cpuset_t
7285sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7286{
7287	struct hme_blk *hmeblkp;
7288	sfmmu_t *sfmmup;
7289	tte_t tte, ttemod;
7290#ifdef DEBUG
7291	tte_t orig_old;
7292#endif /* DEBUG */
7293	caddr_t addr;
7294	int ttesz;
7295	int ret;
7296	cpuset_t cpuset;
7297
7298	ASSERT(pp != NULL);
7299	ASSERT(sfmmu_mlist_held(pp));
7300	ASSERT(!PP_ISKAS(pp));
7301
7302	CPUSET_ZERO(cpuset);
7303
7304	hmeblkp = sfmmu_hmetohblk(sfhme);
7305
7306readtte:
7307	sfmmu_copytte(&sfhme->hme_tte, &tte);
7308	if (TTE_IS_VALID(&tte)) {
7309		sfmmup = hblktosfmmu(hmeblkp);
7310		ttesz = get_hblk_ttesz(hmeblkp);
7311		/*
7312		 * Only unload mappings of 'cons' size.
7313		 */
7314		if (ttesz != cons)
7315			return (cpuset);
7316
7317		/*
7318		 * Note that we have p_mapping lock, but no hash lock here.
7319		 * hblk_unload() has to have both hash lock AND p_mapping
7320		 * lock before it tries to modify tte. So, the tte could
7321		 * not become invalid in the sfmmu_modifytte_try() below.
7322		 */
7323		ttemod = tte;
7324#ifdef DEBUG
7325		orig_old = tte;
7326#endif /* DEBUG */
7327
7328		TTE_SET_INVALID(&ttemod);
7329		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7330		if (ret < 0) {
7331#ifdef DEBUG
7332			/* only R/M bits can change. */
7333			chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7334#endif /* DEBUG */
7335			goto readtte;
7336		}
7337
7338		if (ret == 0) {
7339			panic("pageunload: cas failed?");
7340		}
7341
7342		addr = tte_to_vaddr(hmeblkp, tte);
7343
7344		if (hmeblkp->hblk_shared) {
7345			sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7346			uint_t rid = hmeblkp->hblk_tag.htag_rid;
7347			sf_region_t *rgnp;
7348			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7349			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7350			ASSERT(srdp != NULL);
7351			rgnp = srdp->srd_hmergnp[rid];
7352			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7353			cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7354			sfmmu_ttesync(NULL, addr, &tte, pp);
7355			ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7356			atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1);
7357		} else {
7358			sfmmu_ttesync(sfmmup, addr, &tte, pp);
7359			atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
7360
7361			/*
7362			 * We need to flush the page from the virtual cache
7363			 * in order to prevent a virtual cache alias
7364			 * inconsistency. The particular scenario we need
7365			 * to worry about is:
7366			 * Given:  va1 and va2 are two virtual address that
7367			 * alias and will map the same physical address.
7368			 * 1.   mapping exists from va1 to pa and data has
7369			 *	been read into the cache.
7370			 * 2.   unload va1.
7371			 * 3.   load va2 and modify data using va2.
7372			 * 4    unload va2.
7373			 * 5.   load va1 and reference data.  Unless we flush
7374			 *	the data cache when we unload we will get
7375			 *	stale data.
7376			 * This scenario is taken care of by using virtual
7377			 * page coloring.
7378			 */
7379			if (sfmmup->sfmmu_ismhat) {
7380				/*
7381				 * Flush TSBs, TLBs and caches
7382				 * of every process
7383				 * sharing this ism segment.
7384				 */
7385				sfmmu_hat_lock_all();
7386				mutex_enter(&ism_mlist_lock);
7387				kpreempt_disable();
7388				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7389				    pp->p_pagenum, CACHE_NO_FLUSH);
7390				kpreempt_enable();
7391				mutex_exit(&ism_mlist_lock);
7392				sfmmu_hat_unlock_all();
7393				cpuset = cpu_ready_set;
7394			} else {
7395				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7396				cpuset = sfmmup->sfmmu_cpusran;
7397			}
7398		}
7399
7400		/*
7401		 * Hme_sub has to run after ttesync() and a_rss update.
7402		 * See hblk_unload().
7403		 */
7404		HME_SUB(sfhme, pp);
7405		membar_stst();
7406
7407		/*
7408		 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7409		 * since pteload may have done a HME_ADD() right after
7410		 * we did the HME_SUB() above. Hmecnt is now maintained
7411		 * by cas only. no lock guranteed its value. The only
7412		 * gurantee we have is the hmecnt should not be less than
7413		 * what it should be so the hblk will not be taken away.
7414		 * It's also important that we decremented the hmecnt after
7415		 * we are done with hmeblkp so that this hmeblk won't be
7416		 * stolen.
7417		 */
7418		ASSERT(hmeblkp->hblk_hmecnt > 0);
7419		ASSERT(hmeblkp->hblk_vcnt > 0);
7420		atomic_add_16(&hmeblkp->hblk_vcnt, -1);
7421		atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
7422		/*
7423		 * This is bug 4063182.
7424		 * XXX: fixme
7425		 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7426		 *	!hmeblkp->hblk_lckcnt);
7427		 */
7428	} else {
7429		panic("invalid tte? pp %p &tte %p",
7430		    (void *)pp, (void *)&tte);
7431	}
7432
7433	return (cpuset);
7434}
7435
7436/*
7437 * While relocating a kernel page, this function will move the mappings
7438 * from tpp to dpp and modify any associated data with these mappings.
7439 * It also unsuspends the suspended kernel mapping.
7440 */
7441static void
7442hat_pagereload(struct page *tpp, struct page *dpp)
7443{
7444	struct sf_hment *sfhme;
7445	tte_t tte, ttemod;
7446	int index, cons;
7447
7448	ASSERT(getpil() == PIL_MAX);
7449	ASSERT(sfmmu_mlist_held(tpp));
7450	ASSERT(sfmmu_mlist_held(dpp));
7451
7452	index = PP_MAPINDEX(tpp);
7453	cons = TTE8K;
7454
7455	/* Update real mappings to the page */
7456retry:
7457	for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7458		if (IS_PAHME(sfhme))
7459			continue;
7460		sfmmu_copytte(&sfhme->hme_tte, &tte);
7461		ttemod = tte;
7462
7463		/*
7464		 * replace old pfn with new pfn in TTE
7465		 */
7466		PFN_TO_TTE(ttemod, dpp->p_pagenum);
7467
7468		/*
7469		 * clear suspend bit
7470		 */
7471		ASSERT(TTE_IS_SUSPEND(&ttemod));
7472		TTE_CLR_SUSPEND(&ttemod);
7473
7474		if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7475			panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7476
7477		/*
7478		 * set hme_page point to new page
7479		 */
7480		sfhme->hme_page = dpp;
7481	}
7482
7483	/*
7484	 * move p_mapping list from old page to new page
7485	 */
7486	dpp->p_mapping = tpp->p_mapping;
7487	tpp->p_mapping = NULL;
7488	dpp->p_share = tpp->p_share;
7489	tpp->p_share = 0;
7490
7491	while (index != 0) {
7492		index = index >> 1;
7493		if (index != 0)
7494			cons++;
7495		if (index & 0x1) {
7496			tpp = PP_GROUPLEADER(tpp, cons);
7497			dpp = PP_GROUPLEADER(dpp, cons);
7498			goto retry;
7499		}
7500	}
7501
7502	curthread->t_flag &= ~T_DONTDTRACE;
7503	mutex_exit(&kpr_suspendlock);
7504}
7505
7506uint_t
7507hat_pagesync(struct page *pp, uint_t clearflag)
7508{
7509	struct sf_hment *sfhme, *tmphme = NULL;
7510	struct hme_blk *hmeblkp;
7511	kmutex_t *pml;
7512	cpuset_t cpuset, tset;
7513	int	index, cons;
7514	extern	ulong_t po_share;
7515	page_t	*save_pp = pp;
7516	int	stop_on_sh = 0;
7517	uint_t	shcnt;
7518
7519	CPUSET_ZERO(cpuset);
7520
7521	if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7522		return (PP_GENERIC_ATTR(pp));
7523	}
7524
7525	if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7526		if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7527			return (PP_GENERIC_ATTR(pp));
7528		}
7529		if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7530			return (PP_GENERIC_ATTR(pp));
7531		}
7532		if (clearflag & HAT_SYNC_STOPON_SHARED) {
7533			if (pp->p_share > po_share) {
7534				hat_page_setattr(pp, P_REF);
7535				return (PP_GENERIC_ATTR(pp));
7536			}
7537			stop_on_sh = 1;
7538			shcnt = 0;
7539		}
7540	}
7541
7542	clearflag &= ~HAT_SYNC_STOPON_SHARED;
7543	pml = sfmmu_mlist_enter(pp);
7544	index = PP_MAPINDEX(pp);
7545	cons = TTE8K;
7546retry:
7547	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7548		/*
7549		 * We need to save the next hment on the list since
7550		 * it is possible for pagesync to remove an invalid hment
7551		 * from the list.
7552		 */
7553		tmphme = sfhme->hme_next;
7554		if (IS_PAHME(sfhme))
7555			continue;
7556		/*
7557		 * If we are looking for large mappings and this hme doesn't
7558		 * reach the range we are seeking, just ignore it.
7559		 */
7560		hmeblkp = sfmmu_hmetohblk(sfhme);
7561		if (hmeblkp->hblk_xhat_bit)
7562			continue;
7563
7564		if (hme_size(sfhme) < cons)
7565			continue;
7566
7567		if (stop_on_sh) {
7568			if (hmeblkp->hblk_shared) {
7569				sf_srd_t *srdp = hblktosrd(hmeblkp);
7570				uint_t rid = hmeblkp->hblk_tag.htag_rid;
7571				sf_region_t *rgnp;
7572				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7573				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7574				ASSERT(srdp != NULL);
7575				rgnp = srdp->srd_hmergnp[rid];
7576				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7577				    rgnp, rid);
7578				shcnt += rgnp->rgn_refcnt;
7579			} else {
7580				shcnt++;
7581			}
7582			if (shcnt > po_share) {
7583				/*
7584				 * tell the pager to spare the page this time
7585				 * around.
7586				 */
7587				hat_page_setattr(save_pp, P_REF);
7588				index = 0;
7589				break;
7590			}
7591		}
7592		tset = sfmmu_pagesync(pp, sfhme,
7593		    clearflag & ~HAT_SYNC_STOPON_RM);
7594		CPUSET_OR(cpuset, tset);
7595
7596		/*
7597		 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7598		 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7599		 */
7600		if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7601		    (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7602		    ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7603			index = 0;
7604			break;
7605		}
7606	}
7607
7608	while (index) {
7609		index = index >> 1;
7610		cons++;
7611		if (index & 0x1) {
7612			/* Go to leading page */
7613			pp = PP_GROUPLEADER(pp, cons);
7614			goto retry;
7615		}
7616	}
7617
7618	xt_sync(cpuset);
7619	sfmmu_mlist_exit(pml);
7620	return (PP_GENERIC_ATTR(save_pp));
7621}
7622
7623/*
7624 * Get all the hardware dependent attributes for a page struct
7625 */
7626static cpuset_t
7627sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7628	uint_t clearflag)
7629{
7630	caddr_t addr;
7631	tte_t tte, ttemod;
7632	struct hme_blk *hmeblkp;
7633	int ret;
7634	sfmmu_t *sfmmup;
7635	cpuset_t cpuset;
7636
7637	ASSERT(pp != NULL);
7638	ASSERT(sfmmu_mlist_held(pp));
7639	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7640	    (clearflag == HAT_SYNC_ZERORM));
7641
7642	SFMMU_STAT(sf_pagesync);
7643
7644	CPUSET_ZERO(cpuset);
7645
7646sfmmu_pagesync_retry:
7647
7648	sfmmu_copytte(&sfhme->hme_tte, &tte);
7649	if (TTE_IS_VALID(&tte)) {
7650		hmeblkp = sfmmu_hmetohblk(sfhme);
7651		sfmmup = hblktosfmmu(hmeblkp);
7652		addr = tte_to_vaddr(hmeblkp, tte);
7653		if (clearflag == HAT_SYNC_ZERORM) {
7654			ttemod = tte;
7655			TTE_CLR_RM(&ttemod);
7656			ret = sfmmu_modifytte_try(&tte, &ttemod,
7657			    &sfhme->hme_tte);
7658			if (ret < 0) {
7659				/*
7660				 * cas failed and the new value is not what
7661				 * we want.
7662				 */
7663				goto sfmmu_pagesync_retry;
7664			}
7665
7666			if (ret > 0) {
7667				/* we win the cas */
7668				if (hmeblkp->hblk_shared) {
7669					sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7670					uint_t rid =
7671					    hmeblkp->hblk_tag.htag_rid;
7672					sf_region_t *rgnp;
7673					ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7674					ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7675					ASSERT(srdp != NULL);
7676					rgnp = srdp->srd_hmergnp[rid];
7677					SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7678					    srdp, rgnp, rid);
7679					cpuset = sfmmu_rgntlb_demap(addr,
7680					    rgnp, hmeblkp, 1);
7681				} else {
7682					sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7683					    0, 0);
7684					cpuset = sfmmup->sfmmu_cpusran;
7685				}
7686			}
7687		}
7688		sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7689		    &tte, pp);
7690	}
7691	return (cpuset);
7692}
7693
7694/*
7695 * Remove write permission from a mappings to a page, so that
7696 * we can detect the next modification of it. This requires modifying
7697 * the TTE then invalidating (demap) any TLB entry using that TTE.
7698 * This code is similar to sfmmu_pagesync().
7699 */
7700static cpuset_t
7701sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7702{
7703	caddr_t addr;
7704	tte_t tte;
7705	tte_t ttemod;
7706	struct hme_blk *hmeblkp;
7707	int ret;
7708	sfmmu_t *sfmmup;
7709	cpuset_t cpuset;
7710
7711	ASSERT(pp != NULL);
7712	ASSERT(sfmmu_mlist_held(pp));
7713
7714	CPUSET_ZERO(cpuset);
7715	SFMMU_STAT(sf_clrwrt);
7716
7717retry:
7718
7719	sfmmu_copytte(&sfhme->hme_tte, &tte);
7720	if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7721		hmeblkp = sfmmu_hmetohblk(sfhme);
7722
7723		/*
7724		 * xhat mappings should never be to a VMODSORT page.
7725		 */
7726		ASSERT(hmeblkp->hblk_xhat_bit == 0);
7727
7728		sfmmup = hblktosfmmu(hmeblkp);
7729		addr = tte_to_vaddr(hmeblkp, tte);
7730
7731		ttemod = tte;
7732		TTE_CLR_WRT(&ttemod);
7733		TTE_CLR_MOD(&ttemod);
7734		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7735
7736		/*
7737		 * if cas failed and the new value is not what
7738		 * we want retry
7739		 */
7740		if (ret < 0)
7741			goto retry;
7742
7743		/* we win the cas */
7744		if (ret > 0) {
7745			if (hmeblkp->hblk_shared) {
7746				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7747				uint_t rid = hmeblkp->hblk_tag.htag_rid;
7748				sf_region_t *rgnp;
7749				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7750				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7751				ASSERT(srdp != NULL);
7752				rgnp = srdp->srd_hmergnp[rid];
7753				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7754				    srdp, rgnp, rid);
7755				cpuset = sfmmu_rgntlb_demap(addr,
7756				    rgnp, hmeblkp, 1);
7757			} else {
7758				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7759				cpuset = sfmmup->sfmmu_cpusran;
7760			}
7761		}
7762	}
7763
7764	return (cpuset);
7765}
7766
7767/*
7768 * Walk all mappings of a page, removing write permission and clearing the
7769 * ref/mod bits. This code is similar to hat_pagesync()
7770 */
7771static void
7772hat_page_clrwrt(page_t *pp)
7773{
7774	struct sf_hment *sfhme;
7775	struct sf_hment *tmphme = NULL;
7776	kmutex_t *pml;
7777	cpuset_t cpuset;
7778	cpuset_t tset;
7779	int	index;
7780	int	 cons;
7781
7782	CPUSET_ZERO(cpuset);
7783
7784	pml = sfmmu_mlist_enter(pp);
7785	index = PP_MAPINDEX(pp);
7786	cons = TTE8K;
7787retry:
7788	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7789		tmphme = sfhme->hme_next;
7790
7791		/*
7792		 * If we are looking for large mappings and this hme doesn't
7793		 * reach the range we are seeking, just ignore its.
7794		 */
7795
7796		if (hme_size(sfhme) < cons)
7797			continue;
7798
7799		tset = sfmmu_pageclrwrt(pp, sfhme);
7800		CPUSET_OR(cpuset, tset);
7801	}
7802
7803	while (index) {
7804		index = index >> 1;
7805		cons++;
7806		if (index & 0x1) {
7807			/* Go to leading page */
7808			pp = PP_GROUPLEADER(pp, cons);
7809			goto retry;
7810		}
7811	}
7812
7813	xt_sync(cpuset);
7814	sfmmu_mlist_exit(pml);
7815}
7816
7817/*
7818 * Set the given REF/MOD/RO bits for the given page.
7819 * For a vnode with a sorted v_pages list, we need to change
7820 * the attributes and the v_pages list together under page_vnode_mutex.
7821 */
7822void
7823hat_page_setattr(page_t *pp, uint_t flag)
7824{
7825	vnode_t		*vp = pp->p_vnode;
7826	page_t		**listp;
7827	kmutex_t	*pmtx;
7828	kmutex_t	*vphm = NULL;
7829	int		noshuffle;
7830
7831	noshuffle = flag & P_NSH;
7832	flag &= ~P_NSH;
7833
7834	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7835
7836	/*
7837	 * nothing to do if attribute already set
7838	 */
7839	if ((pp->p_nrm & flag) == flag)
7840		return;
7841
7842	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7843	    !noshuffle) {
7844		vphm = page_vnode_mutex(vp);
7845		mutex_enter(vphm);
7846	}
7847
7848	pmtx = sfmmu_page_enter(pp);
7849	pp->p_nrm |= flag;
7850	sfmmu_page_exit(pmtx);
7851
7852	if (vphm != NULL) {
7853		/*
7854		 * Some File Systems examine v_pages for NULL w/o
7855		 * grabbing the vphm mutex. Must not let it become NULL when
7856		 * pp is the only page on the list.
7857		 */
7858		if (pp->p_vpnext != pp) {
7859			page_vpsub(&vp->v_pages, pp);
7860			if (vp->v_pages != NULL)
7861				listp = &vp->v_pages->p_vpprev->p_vpnext;
7862			else
7863				listp = &vp->v_pages;
7864			page_vpadd(listp, pp);
7865		}
7866		mutex_exit(vphm);
7867	}
7868}
7869
7870void
7871hat_page_clrattr(page_t *pp, uint_t flag)
7872{
7873	vnode_t		*vp = pp->p_vnode;
7874	kmutex_t	*pmtx;
7875
7876	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7877
7878	pmtx = sfmmu_page_enter(pp);
7879
7880	/*
7881	 * Caller is expected to hold page's io lock for VMODSORT to work
7882	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7883	 * bit is cleared.
7884	 * We don't have assert to avoid tripping some existing third party
7885	 * code. The dirty page is moved back to top of the v_page list
7886	 * after IO is done in pvn_write_done().
7887	 */
7888	pp->p_nrm &= ~flag;
7889	sfmmu_page_exit(pmtx);
7890
7891	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7892
7893		/*
7894		 * VMODSORT works by removing write permissions and getting
7895		 * a fault when a page is made dirty. At this point
7896		 * we need to remove write permission from all mappings
7897		 * to this page.
7898		 */
7899		hat_page_clrwrt(pp);
7900	}
7901}
7902
7903uint_t
7904hat_page_getattr(page_t *pp, uint_t flag)
7905{
7906	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7907	return ((uint_t)(pp->p_nrm & flag));
7908}
7909
7910/*
7911 * DEBUG kernels: verify that a kernel va<->pa translation
7912 * is safe by checking the underlying page_t is in a page
7913 * relocation-safe state.
7914 */
7915#ifdef	DEBUG
7916void
7917sfmmu_check_kpfn(pfn_t pfn)
7918{
7919	page_t *pp;
7920	int index, cons;
7921
7922	if (hat_check_vtop == 0)
7923		return;
7924
7925	if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr)
7926		return;
7927
7928	pp = page_numtopp_nolock(pfn);
7929	if (!pp)
7930		return;
7931
7932	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7933		return;
7934
7935	/*
7936	 * Handed a large kernel page, we dig up the root page since we
7937	 * know the root page might have the lock also.
7938	 */
7939	if (pp->p_szc != 0) {
7940		index = PP_MAPINDEX(pp);
7941		cons = TTE8K;
7942again:
7943		while (index != 0) {
7944			index >>= 1;
7945			if (index != 0)
7946				cons++;
7947			if (index & 0x1) {
7948				pp = PP_GROUPLEADER(pp, cons);
7949				goto again;
7950			}
7951		}
7952	}
7953
7954	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7955		return;
7956
7957	/*
7958	 * Pages need to be locked or allocated "permanent" (either from
7959	 * static_arena arena or explicitly setting PG_NORELOC when calling
7960	 * page_create_va()) for VA->PA translations to be valid.
7961	 */
7962	if (!PP_ISNORELOC(pp))
7963		panic("Illegal VA->PA translation, pp 0x%p not permanent",
7964		    (void *)pp);
7965	else
7966		panic("Illegal VA->PA translation, pp 0x%p not locked",
7967		    (void *)pp);
7968}
7969#endif	/* DEBUG */
7970
7971/*
7972 * Returns a page frame number for a given virtual address.
7973 * Returns PFN_INVALID to indicate an invalid mapping
7974 */
7975pfn_t
7976hat_getpfnum(struct hat *hat, caddr_t addr)
7977{
7978	pfn_t pfn;
7979	tte_t tte;
7980
7981	/*
7982	 * We would like to
7983	 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7984	 * but we can't because the iommu driver will call this
7985	 * routine at interrupt time and it can't grab the as lock
7986	 * or it will deadlock: A thread could have the as lock
7987	 * and be waiting for io.  The io can't complete
7988	 * because the interrupt thread is blocked trying to grab
7989	 * the as lock.
7990	 */
7991
7992	ASSERT(hat->sfmmu_xhat_provider == NULL);
7993
7994	if (hat == ksfmmup) {
7995		if (IS_KMEM_VA_LARGEPAGE(addr)) {
7996			ASSERT(segkmem_lpszc > 0);
7997			pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7998			if (pfn != PFN_INVALID) {
7999				sfmmu_check_kpfn(pfn);
8000				return (pfn);
8001			}
8002		} else if (segkpm && IS_KPM_ADDR(addr)) {
8003			return (sfmmu_kpm_vatopfn(addr));
8004		}
8005		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
8006		    == PFN_SUSPENDED) {
8007			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
8008		}
8009		sfmmu_check_kpfn(pfn);
8010		return (pfn);
8011	} else {
8012		return (sfmmu_uvatopfn(addr, hat, NULL));
8013	}
8014}
8015
8016/*
8017 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
8018 * Use hat_getpfnum(kas.a_hat, ...) instead.
8019 *
8020 * We'd like to return PFN_INVALID if the mappings have underlying page_t's
8021 * but can't right now due to the fact that some software has grown to use
8022 * this interface incorrectly. So for now when the interface is misused,
8023 * return a warning to the user that in the future it won't work in the
8024 * way they're abusing it, and carry on (after disabling page relocation).
8025 */
8026pfn_t
8027hat_getkpfnum(caddr_t addr)
8028{
8029	pfn_t pfn;
8030	tte_t tte;
8031	int badcaller = 0;
8032	extern int segkmem_reloc;
8033
8034	if (segkpm && IS_KPM_ADDR(addr)) {
8035		badcaller = 1;
8036		pfn = sfmmu_kpm_vatopfn(addr);
8037	} else {
8038		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
8039		    == PFN_SUSPENDED) {
8040			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
8041		}
8042		badcaller = pf_is_memory(pfn);
8043	}
8044
8045	if (badcaller) {
8046		/*
8047		 * We can't return PFN_INVALID or the caller may panic
8048		 * or corrupt the system.  The only alternative is to
8049		 * disable page relocation at this point for all kernel
8050		 * memory.  This will impact any callers of page_relocate()
8051		 * such as FMA or DR.
8052		 *
8053		 * RFE: Add junk here to spit out an ereport so the sysadmin
8054		 * can be advised that he should upgrade his device driver
8055		 * so that this doesn't happen.
8056		 */
8057		hat_getkpfnum_badcall(caller());
8058		if (hat_kpr_enabled && segkmem_reloc) {
8059			hat_kpr_enabled = 0;
8060			segkmem_reloc = 0;
8061			cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED");
8062		}
8063	}
8064	return (pfn);
8065}
8066
8067/*
8068 * This routine will return both pfn and tte for the vaddr.
8069 */
8070static pfn_t
8071sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
8072{
8073	struct hmehash_bucket *hmebp;
8074	hmeblk_tag hblktag;
8075	int hmeshift, hashno = 1;
8076	struct hme_blk *hmeblkp = NULL;
8077	tte_t tte;
8078
8079	struct sf_hment *sfhmep;
8080	pfn_t pfn;
8081
8082	/* support for ISM */
8083	ism_map_t	*ism_map;
8084	ism_blk_t	*ism_blkp;
8085	int		i;
8086	sfmmu_t *ism_hatid = NULL;
8087	sfmmu_t *locked_hatid = NULL;
8088	sfmmu_t	*sv_sfmmup = sfmmup;
8089	caddr_t	sv_vaddr = vaddr;
8090	sf_srd_t *srdp;
8091
8092	if (ttep == NULL) {
8093		ttep = &tte;
8094	} else {
8095		ttep->ll = 0;
8096	}
8097
8098	ASSERT(sfmmup != ksfmmup);
8099	SFMMU_STAT(sf_user_vtop);
8100	/*
8101	 * Set ism_hatid if vaddr falls in a ISM segment.
8102	 */
8103	ism_blkp = sfmmup->sfmmu_iblk;
8104	if (ism_blkp != NULL) {
8105		sfmmu_ismhat_enter(sfmmup, 0);
8106		locked_hatid = sfmmup;
8107	}
8108	while (ism_blkp != NULL && ism_hatid == NULL) {
8109		ism_map = ism_blkp->iblk_maps;
8110		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
8111			if (vaddr >= ism_start(ism_map[i]) &&
8112			    vaddr < ism_end(ism_map[i])) {
8113				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
8114				vaddr = (caddr_t)(vaddr -
8115				    ism_start(ism_map[i]));
8116				break;
8117			}
8118		}
8119		ism_blkp = ism_blkp->iblk_next;
8120	}
8121	if (locked_hatid) {
8122		sfmmu_ismhat_exit(locked_hatid, 0);
8123	}
8124
8125	hblktag.htag_id = sfmmup;
8126	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
8127	do {
8128		hmeshift = HME_HASH_SHIFT(hashno);
8129		hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
8130		hblktag.htag_rehash = hashno;
8131		hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
8132
8133		SFMMU_HASH_LOCK(hmebp);
8134
8135		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
8136		if (hmeblkp != NULL) {
8137			ASSERT(!hmeblkp->hblk_shared);
8138			HBLKTOHME(sfhmep, hmeblkp, vaddr);
8139			sfmmu_copytte(&sfhmep->hme_tte, ttep);
8140			SFMMU_HASH_UNLOCK(hmebp);
8141			if (TTE_IS_VALID(ttep)) {
8142				pfn = TTE_TO_PFN(vaddr, ttep);
8143				return (pfn);
8144			}
8145			break;
8146		}
8147		SFMMU_HASH_UNLOCK(hmebp);
8148		hashno++;
8149	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
8150
8151	if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
8152		return (PFN_INVALID);
8153	}
8154	srdp = sv_sfmmup->sfmmu_srdp;
8155	ASSERT(srdp != NULL);
8156	ASSERT(srdp->srd_refcnt != 0);
8157	hblktag.htag_id = srdp;
8158	hashno = 1;
8159	do {
8160		hmeshift = HME_HASH_SHIFT(hashno);
8161		hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
8162		hblktag.htag_rehash = hashno;
8163		hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
8164
8165		SFMMU_HASH_LOCK(hmebp);
8166		for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
8167		    hmeblkp = hmeblkp->hblk_next) {
8168			uint_t rid;
8169			sf_region_t *rgnp;
8170			caddr_t rsaddr;
8171			caddr_t readdr;
8172
8173			if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
8174			    sv_sfmmup->sfmmu_hmeregion_map)) {
8175				continue;
8176			}
8177			ASSERT(hmeblkp->hblk_shared);
8178			rid = hmeblkp->hblk_tag.htag_rid;
8179			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8180			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8181			rgnp = srdp->srd_hmergnp[rid];
8182			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
8183			HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
8184			sfmmu_copytte(&sfhmep->hme_tte, ttep);
8185			rsaddr = rgnp->rgn_saddr;
8186			readdr = rsaddr + rgnp->rgn_size;
8187#ifdef DEBUG
8188			if (TTE_IS_VALID(ttep) ||
8189			    get_hblk_ttesz(hmeblkp) > TTE8K) {
8190				caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8191				ASSERT(eva > sv_vaddr);
8192				ASSERT(sv_vaddr >= rsaddr);
8193				ASSERT(sv_vaddr < readdr);
8194				ASSERT(eva <= readdr);
8195			}
8196#endif /* DEBUG */
8197			/*
8198			 * Continue the search if we
8199			 * found an invalid 8K tte outside of the area
8200			 * covered by this hmeblk's region.
8201			 */
8202			if (TTE_IS_VALID(ttep)) {
8203				SFMMU_HASH_UNLOCK(hmebp);
8204				pfn = TTE_TO_PFN(sv_vaddr, ttep);
8205				return (pfn);
8206			} else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8207			    (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8208				SFMMU_HASH_UNLOCK(hmebp);
8209				pfn = PFN_INVALID;
8210				return (pfn);
8211			}
8212		}
8213		SFMMU_HASH_UNLOCK(hmebp);
8214		hashno++;
8215	} while (hashno <= mmu_hashcnt);
8216	return (PFN_INVALID);
8217}
8218
8219
8220/*
8221 * For compatability with AT&T and later optimizations
8222 */
8223/* ARGSUSED */
8224void
8225hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8226{
8227	ASSERT(hat != NULL);
8228	ASSERT(hat->sfmmu_xhat_provider == NULL);
8229}
8230
8231/*
8232 * Return the number of mappings to a particular page.  This number is an
8233 * approximation of the number of people sharing the page.
8234 *
8235 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8236 * hat_page_checkshare() can be used to compare threshold to share
8237 * count that reflects the number of region sharers albeit at higher cost.
8238 */
8239ulong_t
8240hat_page_getshare(page_t *pp)
8241{
8242	page_t *spp = pp;	/* start page */
8243	kmutex_t *pml;
8244	ulong_t	cnt;
8245	int index, sz = TTE64K;
8246
8247	/*
8248	 * We need to grab the mlist lock to make sure any outstanding
8249	 * load/unloads complete.  Otherwise we could return zero
8250	 * even though the unload(s) hasn't finished yet.
8251	 */
8252	pml = sfmmu_mlist_enter(spp);
8253	cnt = spp->p_share;
8254
8255#ifdef VAC
8256	if (kpm_enable)
8257		cnt += spp->p_kpmref;
8258#endif
8259	if (vpm_enable && pp->p_vpmref) {
8260		cnt += 1;
8261	}
8262
8263	/*
8264	 * If we have any large mappings, we count the number of
8265	 * mappings that this large page is part of.
8266	 */
8267	index = PP_MAPINDEX(spp);
8268	index >>= 1;
8269	while (index) {
8270		pp = PP_GROUPLEADER(spp, sz);
8271		if ((index & 0x1) && pp != spp) {
8272			cnt += pp->p_share;
8273			spp = pp;
8274		}
8275		index >>= 1;
8276		sz++;
8277	}
8278	sfmmu_mlist_exit(pml);
8279	return (cnt);
8280}
8281
8282/*
8283 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8284 * otherwise. Count shared hmeblks by region's refcnt.
8285 */
8286int
8287hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8288{
8289	kmutex_t *pml;
8290	ulong_t	cnt = 0;
8291	int index, sz = TTE8K;
8292	struct sf_hment *sfhme, *tmphme = NULL;
8293	struct hme_blk *hmeblkp;
8294
8295	pml = sfmmu_mlist_enter(pp);
8296
8297#ifdef VAC
8298	if (kpm_enable)
8299		cnt = pp->p_kpmref;
8300#endif
8301
8302	if (vpm_enable && pp->p_vpmref) {
8303		cnt += 1;
8304	}
8305
8306	if (pp->p_share + cnt > sh_thresh) {
8307		sfmmu_mlist_exit(pml);
8308		return (1);
8309	}
8310
8311	index = PP_MAPINDEX(pp);
8312
8313again:
8314	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8315		tmphme = sfhme->hme_next;
8316		if (IS_PAHME(sfhme)) {
8317			continue;
8318		}
8319
8320		hmeblkp = sfmmu_hmetohblk(sfhme);
8321		if (hmeblkp->hblk_xhat_bit) {
8322			cnt++;
8323			if (cnt > sh_thresh) {
8324				sfmmu_mlist_exit(pml);
8325				return (1);
8326			}
8327			continue;
8328		}
8329		if (hme_size(sfhme) != sz) {
8330			continue;
8331		}
8332
8333		if (hmeblkp->hblk_shared) {
8334			sf_srd_t *srdp = hblktosrd(hmeblkp);
8335			uint_t rid = hmeblkp->hblk_tag.htag_rid;
8336			sf_region_t *rgnp;
8337			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8338			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8339			ASSERT(srdp != NULL);
8340			rgnp = srdp->srd_hmergnp[rid];
8341			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8342			    rgnp, rid);
8343			cnt += rgnp->rgn_refcnt;
8344		} else {
8345			cnt++;
8346		}
8347		if (cnt > sh_thresh) {
8348			sfmmu_mlist_exit(pml);
8349			return (1);
8350		}
8351	}
8352
8353	index >>= 1;
8354	sz++;
8355	while (index) {
8356		pp = PP_GROUPLEADER(pp, sz);
8357		ASSERT(sfmmu_mlist_held(pp));
8358		if (index & 0x1) {
8359			goto again;
8360		}
8361		index >>= 1;
8362		sz++;
8363	}
8364	sfmmu_mlist_exit(pml);
8365	return (0);
8366}
8367
8368/*
8369 * Unload all large mappings to the pp and reset the p_szc field of every
8370 * constituent page according to the remaining mappings.
8371 *
8372 * pp must be locked SE_EXCL. Even though no other constituent pages are
8373 * locked it's legal to unload the large mappings to the pp because all
8374 * constituent pages of large locked mappings have to be locked SE_SHARED.
8375 * This means if we have SE_EXCL lock on one of constituent pages none of the
8376 * large mappings to pp are locked.
8377 *
8378 * Decrease p_szc field starting from the last constituent page and ending
8379 * with the root page. This method is used because other threads rely on the
8380 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8381 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8382 * ensures that p_szc changes of the constituent pages appears atomic for all
8383 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8384 *
8385 * This mechanism is only used for file system pages where it's not always
8386 * possible to get SE_EXCL locks on all constituent pages to demote the size
8387 * code (as is done for anonymous or kernel large pages).
8388 *
8389 * See more comments in front of sfmmu_mlspl_enter().
8390 */
8391void
8392hat_page_demote(page_t *pp)
8393{
8394	int index;
8395	int sz;
8396	cpuset_t cpuset;
8397	int sync = 0;
8398	page_t *rootpp;
8399	struct sf_hment *sfhme;
8400	struct sf_hment *tmphme = NULL;
8401	struct hme_blk *hmeblkp;
8402	uint_t pszc;
8403	page_t *lastpp;
8404	cpuset_t tset;
8405	pgcnt_t npgs;
8406	kmutex_t *pml;
8407	kmutex_t *pmtx = NULL;
8408
8409	ASSERT(PAGE_EXCL(pp));
8410	ASSERT(!PP_ISFREE(pp));
8411	ASSERT(!PP_ISKAS(pp));
8412	ASSERT(page_szc_lock_assert(pp));
8413	pml = sfmmu_mlist_enter(pp);
8414
8415	pszc = pp->p_szc;
8416	if (pszc == 0) {
8417		goto out;
8418	}
8419
8420	index = PP_MAPINDEX(pp) >> 1;
8421
8422	if (index) {
8423		CPUSET_ZERO(cpuset);
8424		sz = TTE64K;
8425		sync = 1;
8426	}
8427
8428	while (index) {
8429		if (!(index & 0x1)) {
8430			index >>= 1;
8431			sz++;
8432			continue;
8433		}
8434		ASSERT(sz <= pszc);
8435		rootpp = PP_GROUPLEADER(pp, sz);
8436		for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8437			tmphme = sfhme->hme_next;
8438			ASSERT(!IS_PAHME(sfhme));
8439			hmeblkp = sfmmu_hmetohblk(sfhme);
8440			if (hme_size(sfhme) != sz) {
8441				continue;
8442			}
8443			if (hmeblkp->hblk_xhat_bit) {
8444				cmn_err(CE_PANIC,
8445				    "hat_page_demote: xhat hmeblk");
8446			}
8447			tset = sfmmu_pageunload(rootpp, sfhme, sz);
8448			CPUSET_OR(cpuset, tset);
8449		}
8450		if (index >>= 1) {
8451			sz++;
8452		}
8453	}
8454
8455	ASSERT(!PP_ISMAPPED_LARGE(pp));
8456
8457	if (sync) {
8458		xt_sync(cpuset);
8459#ifdef VAC
8460		if (PP_ISTNC(pp)) {
8461			conv_tnc(rootpp, sz);
8462		}
8463#endif	/* VAC */
8464	}
8465
8466	pmtx = sfmmu_page_enter(pp);
8467
8468	ASSERT(pp->p_szc == pszc);
8469	rootpp = PP_PAGEROOT(pp);
8470	ASSERT(rootpp->p_szc == pszc);
8471	lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8472
8473	while (lastpp != rootpp) {
8474		sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8475		ASSERT(sz < pszc);
8476		npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8477		ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8478		while (--npgs > 0) {
8479			lastpp->p_szc = (uchar_t)sz;
8480			lastpp = PP_PAGEPREV(lastpp);
8481		}
8482		if (sz) {
8483			/*
8484			 * make sure before current root's pszc
8485			 * is updated all updates to constituent pages pszc
8486			 * fields are globally visible.
8487			 */
8488			membar_producer();
8489		}
8490		lastpp->p_szc = sz;
8491		ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8492		if (lastpp != rootpp) {
8493			lastpp = PP_PAGEPREV(lastpp);
8494		}
8495	}
8496	if (sz == 0) {
8497		/* the loop above doesn't cover this case */
8498		rootpp->p_szc = 0;
8499	}
8500out:
8501	ASSERT(pp->p_szc == 0);
8502	if (pmtx != NULL) {
8503		sfmmu_page_exit(pmtx);
8504	}
8505	sfmmu_mlist_exit(pml);
8506}
8507
8508/*
8509 * Refresh the HAT ismttecnt[] element for size szc.
8510 * Caller must have set ISM busy flag to prevent mapping
8511 * lists from changing while we're traversing them.
8512 */
8513pgcnt_t
8514ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8515{
8516	ism_blk_t	*ism_blkp = sfmmup->sfmmu_iblk;
8517	ism_map_t	*ism_map;
8518	pgcnt_t		npgs = 0;
8519	pgcnt_t		npgs_scd = 0;
8520	int		j;
8521	sf_scd_t	*scdp;
8522	uchar_t		rid;
8523
8524	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8525	scdp = sfmmup->sfmmu_scdp;
8526
8527	for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8528		ism_map = ism_blkp->iblk_maps;
8529		for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8530			rid = ism_map[j].imap_rid;
8531			ASSERT(rid == SFMMU_INVALID_ISMRID ||
8532			    rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8533
8534			if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8535			    SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8536				/* ISM is in sfmmup's SCD */
8537				npgs_scd +=
8538				    ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8539			} else {
8540				/* ISMs is not in SCD */
8541				npgs +=
8542				    ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8543			}
8544		}
8545	}
8546	sfmmup->sfmmu_ismttecnt[szc] = npgs;
8547	sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8548	return (npgs);
8549}
8550
8551/*
8552 * Yield the memory claim requirement for an address space.
8553 *
8554 * This is currently implemented as the number of bytes that have active
8555 * hardware translations that have page structures.  Therefore, it can
8556 * underestimate the traditional resident set size, eg, if the
8557 * physical page is present and the hardware translation is missing;
8558 * and it can overestimate the rss, eg, if there are active
8559 * translations to a frame buffer with page structs.
8560 * Also, it does not take sharing into account.
8561 *
8562 * Note that we don't acquire locks here since this function is most often
8563 * called from the clock thread.
8564 */
8565size_t
8566hat_get_mapped_size(struct hat *hat)
8567{
8568	size_t		assize = 0;
8569	int 		i;
8570
8571	if (hat == NULL)
8572		return (0);
8573
8574	ASSERT(hat->sfmmu_xhat_provider == NULL);
8575
8576	for (i = 0; i < mmu_page_sizes; i++)
8577		assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8578		    (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8579
8580	if (hat->sfmmu_iblk == NULL)
8581		return (assize);
8582
8583	for (i = 0; i < mmu_page_sizes; i++)
8584		assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8585		    (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8586
8587	return (assize);
8588}
8589
8590int
8591hat_stats_enable(struct hat *hat)
8592{
8593	hatlock_t	*hatlockp;
8594
8595	ASSERT(hat->sfmmu_xhat_provider == NULL);
8596
8597	hatlockp = sfmmu_hat_enter(hat);
8598	hat->sfmmu_rmstat++;
8599	sfmmu_hat_exit(hatlockp);
8600	return (1);
8601}
8602
8603void
8604hat_stats_disable(struct hat *hat)
8605{
8606	hatlock_t	*hatlockp;
8607
8608	ASSERT(hat->sfmmu_xhat_provider == NULL);
8609
8610	hatlockp = sfmmu_hat_enter(hat);
8611	hat->sfmmu_rmstat--;
8612	sfmmu_hat_exit(hatlockp);
8613}
8614
8615/*
8616 * Routines for entering or removing  ourselves from the
8617 * ism_hat's mapping list. This is used for both private and
8618 * SCD hats.
8619 */
8620static void
8621iment_add(struct ism_ment *iment,  struct hat *ism_hat)
8622{
8623	ASSERT(MUTEX_HELD(&ism_mlist_lock));
8624
8625	iment->iment_prev = NULL;
8626	iment->iment_next = ism_hat->sfmmu_iment;
8627	if (ism_hat->sfmmu_iment) {
8628		ism_hat->sfmmu_iment->iment_prev = iment;
8629	}
8630	ism_hat->sfmmu_iment = iment;
8631}
8632
8633static void
8634iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8635{
8636	ASSERT(MUTEX_HELD(&ism_mlist_lock));
8637
8638	if (ism_hat->sfmmu_iment == NULL) {
8639		panic("ism map entry remove - no entries");
8640	}
8641
8642	if (iment->iment_prev) {
8643		ASSERT(ism_hat->sfmmu_iment != iment);
8644		iment->iment_prev->iment_next = iment->iment_next;
8645	} else {
8646		ASSERT(ism_hat->sfmmu_iment == iment);
8647		ism_hat->sfmmu_iment = iment->iment_next;
8648	}
8649
8650	if (iment->iment_next) {
8651		iment->iment_next->iment_prev = iment->iment_prev;
8652	}
8653
8654	/*
8655	 * zero out the entry
8656	 */
8657	iment->iment_next = NULL;
8658	iment->iment_prev = NULL;
8659	iment->iment_hat =  NULL;
8660	iment->iment_base_va = 0;
8661}
8662
8663/*
8664 * Hat_share()/unshare() return an (non-zero) error
8665 * when saddr and daddr are not properly aligned.
8666 *
8667 * The top level mapping element determines the alignment
8668 * requirement for saddr and daddr, depending on different
8669 * architectures.
8670 *
8671 * When hat_share()/unshare() are not supported,
8672 * HATOP_SHARE()/UNSHARE() return 0
8673 */
8674int
8675hat_share(struct hat *sfmmup, caddr_t addr,
8676	struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8677{
8678	ism_blk_t	*ism_blkp;
8679	ism_blk_t	*new_iblk;
8680	ism_map_t 	*ism_map;
8681	ism_ment_t	*ism_ment;
8682	int		i, added;
8683	hatlock_t	*hatlockp;
8684	int		reload_mmu = 0;
8685	uint_t		ismshift = page_get_shift(ismszc);
8686	size_t		ismpgsz = page_get_pagesize(ismszc);
8687	uint_t		ismmask = (uint_t)ismpgsz - 1;
8688	size_t		sh_size = ISM_SHIFT(ismshift, len);
8689	ushort_t	ismhatflag;
8690	hat_region_cookie_t rcookie;
8691	sf_scd_t	*old_scdp;
8692
8693#ifdef DEBUG
8694	caddr_t		eaddr = addr + len;
8695#endif /* DEBUG */
8696
8697	ASSERT(ism_hatid != NULL && sfmmup != NULL);
8698	ASSERT(sptaddr == ISMID_STARTADDR);
8699	/*
8700	 * Check the alignment.
8701	 */
8702	if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8703		return (EINVAL);
8704
8705	/*
8706	 * Check size alignment.
8707	 */
8708	if (!ISM_ALIGNED(ismshift, len))
8709		return (EINVAL);
8710
8711	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8712
8713	/*
8714	 * Allocate ism_ment for the ism_hat's mapping list, and an
8715	 * ism map blk in case we need one.  We must do our
8716	 * allocations before acquiring locks to prevent a deadlock
8717	 * in the kmem allocator on the mapping list lock.
8718	 */
8719	new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8720	ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8721
8722	/*
8723	 * Serialize ISM mappings with the ISM busy flag, and also the
8724	 * trap handlers.
8725	 */
8726	sfmmu_ismhat_enter(sfmmup, 0);
8727
8728	/*
8729	 * Allocate an ism map blk if necessary.
8730	 */
8731	if (sfmmup->sfmmu_iblk == NULL) {
8732		sfmmup->sfmmu_iblk = new_iblk;
8733		bzero(new_iblk, sizeof (*new_iblk));
8734		new_iblk->iblk_nextpa = (uint64_t)-1;
8735		membar_stst();	/* make sure next ptr visible to all CPUs */
8736		sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8737		reload_mmu = 1;
8738		new_iblk = NULL;
8739	}
8740
8741#ifdef DEBUG
8742	/*
8743	 * Make sure mapping does not already exist.
8744	 */
8745	ism_blkp = sfmmup->sfmmu_iblk;
8746	while (ism_blkp != NULL) {
8747		ism_map = ism_blkp->iblk_maps;
8748		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8749			if ((addr >= ism_start(ism_map[i]) &&
8750			    addr < ism_end(ism_map[i])) ||
8751			    eaddr > ism_start(ism_map[i]) &&
8752			    eaddr <= ism_end(ism_map[i])) {
8753				panic("sfmmu_share: Already mapped!");
8754			}
8755		}
8756		ism_blkp = ism_blkp->iblk_next;
8757	}
8758#endif /* DEBUG */
8759
8760	ASSERT(ismszc >= TTE4M);
8761	if (ismszc == TTE4M) {
8762		ismhatflag = HAT_4M_FLAG;
8763	} else if (ismszc == TTE32M) {
8764		ismhatflag = HAT_32M_FLAG;
8765	} else if (ismszc == TTE256M) {
8766		ismhatflag = HAT_256M_FLAG;
8767	}
8768	/*
8769	 * Add mapping to first available mapping slot.
8770	 */
8771	ism_blkp = sfmmup->sfmmu_iblk;
8772	added = 0;
8773	while (!added) {
8774		ism_map = ism_blkp->iblk_maps;
8775		for (i = 0; i < ISM_MAP_SLOTS; i++)  {
8776			if (ism_map[i].imap_ismhat == NULL) {
8777
8778				ism_map[i].imap_ismhat = ism_hatid;
8779				ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8780				ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8781				ism_map[i].imap_hatflags = ismhatflag;
8782				ism_map[i].imap_sz_mask = ismmask;
8783				/*
8784				 * imap_seg is checked in ISM_CHECK to see if
8785				 * non-NULL, then other info assumed valid.
8786				 */
8787				membar_stst();
8788				ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8789				ism_map[i].imap_ment = ism_ment;
8790
8791				/*
8792				 * Now add ourselves to the ism_hat's
8793				 * mapping list.
8794				 */
8795				ism_ment->iment_hat = sfmmup;
8796				ism_ment->iment_base_va = addr;
8797				ism_hatid->sfmmu_ismhat = 1;
8798				mutex_enter(&ism_mlist_lock);
8799				iment_add(ism_ment, ism_hatid);
8800				mutex_exit(&ism_mlist_lock);
8801				added = 1;
8802				break;
8803			}
8804		}
8805		if (!added && ism_blkp->iblk_next == NULL) {
8806			ism_blkp->iblk_next = new_iblk;
8807			new_iblk = NULL;
8808			bzero(ism_blkp->iblk_next,
8809			    sizeof (*ism_blkp->iblk_next));
8810			ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8811			membar_stst();
8812			ism_blkp->iblk_nextpa =
8813			    va_to_pa((caddr_t)ism_blkp->iblk_next);
8814		}
8815		ism_blkp = ism_blkp->iblk_next;
8816	}
8817
8818	/*
8819	 * After calling hat_join_region, sfmmup may join a new SCD or
8820	 * move from the old scd to a new scd, in which case, we want to
8821	 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8822	 * sfmmu_check_page_sizes at the end of this routine.
8823	 */
8824	old_scdp = sfmmup->sfmmu_scdp;
8825
8826	rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8827	    PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8828	if (rcookie != HAT_INVALID_REGION_COOKIE) {
8829		ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8830	}
8831	/*
8832	 * Update our counters for this sfmmup's ism mappings.
8833	 */
8834	for (i = 0; i <= ismszc; i++) {
8835		if (!(disable_ism_large_pages & (1 << i)))
8836			(void) ism_tsb_entries(sfmmup, i);
8837	}
8838
8839	/*
8840	 * For ISM and DISM we do not support 512K pages, so we only only
8841	 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8842	 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8843	 *
8844	 * Need to set 32M/256M ISM flags to make sure
8845	 * sfmmu_check_page_sizes() enables them on Panther.
8846	 */
8847	ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8848
8849	switch (ismszc) {
8850	case TTE256M:
8851		if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8852			hatlockp = sfmmu_hat_enter(sfmmup);
8853			SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8854			sfmmu_hat_exit(hatlockp);
8855		}
8856		break;
8857	case TTE32M:
8858		if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8859			hatlockp = sfmmu_hat_enter(sfmmup);
8860			SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8861			sfmmu_hat_exit(hatlockp);
8862		}
8863		break;
8864	default:
8865		break;
8866	}
8867
8868	/*
8869	 * If we updated the ismblkpa for this HAT we must make
8870	 * sure all CPUs running this process reload their tsbmiss area.
8871	 * Otherwise they will fail to load the mappings in the tsbmiss
8872	 * handler and will loop calling pagefault().
8873	 */
8874	if (reload_mmu) {
8875		hatlockp = sfmmu_hat_enter(sfmmup);
8876		sfmmu_sync_mmustate(sfmmup);
8877		sfmmu_hat_exit(hatlockp);
8878	}
8879
8880	sfmmu_ismhat_exit(sfmmup, 0);
8881
8882	/*
8883	 * Free up ismblk if we didn't use it.
8884	 */
8885	if (new_iblk != NULL)
8886		kmem_cache_free(ism_blk_cache, new_iblk);
8887
8888	/*
8889	 * Check TSB and TLB page sizes.
8890	 */
8891	if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8892		sfmmu_check_page_sizes(sfmmup, 0);
8893	} else {
8894		sfmmu_check_page_sizes(sfmmup, 1);
8895	}
8896	return (0);
8897}
8898
8899/*
8900 * hat_unshare removes exactly one ism_map from
8901 * this process's as.  It expects multiple calls
8902 * to hat_unshare for multiple shm segments.
8903 */
8904void
8905hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8906{
8907	ism_map_t 	*ism_map;
8908	ism_ment_t	*free_ment = NULL;
8909	ism_blk_t	*ism_blkp;
8910	struct hat	*ism_hatid;
8911	int 		found, i;
8912	hatlock_t	*hatlockp;
8913	struct tsb_info	*tsbinfo;
8914	uint_t		ismshift = page_get_shift(ismszc);
8915	size_t		sh_size = ISM_SHIFT(ismshift, len);
8916	uchar_t		ism_rid;
8917	sf_scd_t	*old_scdp;
8918
8919	ASSERT(ISM_ALIGNED(ismshift, addr));
8920	ASSERT(ISM_ALIGNED(ismshift, len));
8921	ASSERT(sfmmup != NULL);
8922	ASSERT(sfmmup != ksfmmup);
8923
8924	if (sfmmup->sfmmu_xhat_provider) {
8925		XHAT_UNSHARE(sfmmup, addr, len);
8926		return;
8927	} else {
8928		/*
8929		 * This must be a CPU HAT. If the address space has
8930		 * XHATs attached, inform all XHATs that ISM segment
8931		 * is going away
8932		 */
8933		ASSERT(sfmmup->sfmmu_as != NULL);
8934		if (sfmmup->sfmmu_as->a_xhat != NULL)
8935			xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8936	}
8937
8938	/*
8939	 * Make sure that during the entire time ISM mappings are removed,
8940	 * the trap handlers serialize behind us, and that no one else
8941	 * can be mucking with ISM mappings.  This also lets us get away
8942	 * with not doing expensive cross calls to flush the TLB -- we
8943	 * just discard the context, flush the entire TSB, and call it
8944	 * a day.
8945	 */
8946	sfmmu_ismhat_enter(sfmmup, 0);
8947
8948	/*
8949	 * Remove the mapping.
8950	 *
8951	 * We can't have any holes in the ism map.
8952	 * The tsb miss code while searching the ism map will
8953	 * stop on an empty map slot.  So we must move
8954	 * everyone past the hole up 1 if any.
8955	 *
8956	 * Also empty ism map blks are not freed until the
8957	 * process exits. This is to prevent a MT race condition
8958	 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8959	 */
8960	found = 0;
8961	ism_blkp = sfmmup->sfmmu_iblk;
8962	while (!found && ism_blkp != NULL) {
8963		ism_map = ism_blkp->iblk_maps;
8964		for (i = 0; i < ISM_MAP_SLOTS; i++) {
8965			if (addr == ism_start(ism_map[i]) &&
8966			    sh_size == (size_t)(ism_size(ism_map[i]))) {
8967				found = 1;
8968				break;
8969			}
8970		}
8971		if (!found)
8972			ism_blkp = ism_blkp->iblk_next;
8973	}
8974
8975	if (found) {
8976		ism_hatid = ism_map[i].imap_ismhat;
8977		ism_rid = ism_map[i].imap_rid;
8978		ASSERT(ism_hatid != NULL);
8979		ASSERT(ism_hatid->sfmmu_ismhat == 1);
8980
8981		/*
8982		 * After hat_leave_region, the sfmmup may leave SCD,
8983		 * in which case, we want to grow the private tsb size when
8984		 * calling sfmmu_check_page_sizes at the end of the routine.
8985		 */
8986		old_scdp = sfmmup->sfmmu_scdp;
8987		/*
8988		 * Then remove ourselves from the region.
8989		 */
8990		if (ism_rid != SFMMU_INVALID_ISMRID) {
8991			hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8992			    HAT_REGION_ISM);
8993		}
8994
8995		/*
8996		 * And now guarantee that any other cpu
8997		 * that tries to process an ISM miss
8998		 * will go to tl=0.
8999		 */
9000		hatlockp = sfmmu_hat_enter(sfmmup);
9001		sfmmu_invalidate_ctx(sfmmup);
9002		sfmmu_hat_exit(hatlockp);
9003
9004		/*
9005		 * Remove ourselves from the ism mapping list.
9006		 */
9007		mutex_enter(&ism_mlist_lock);
9008		iment_sub(ism_map[i].imap_ment, ism_hatid);
9009		mutex_exit(&ism_mlist_lock);
9010		free_ment = ism_map[i].imap_ment;
9011
9012		/*
9013		 * We delete the ism map by copying
9014		 * the next map over the current one.
9015		 * We will take the next one in the maps
9016		 * array or from the next ism_blk.
9017		 */
9018		while (ism_blkp != NULL) {
9019			ism_map = ism_blkp->iblk_maps;
9020			while (i < (ISM_MAP_SLOTS - 1)) {
9021				ism_map[i] = ism_map[i + 1];
9022				i++;
9023			}
9024			/* i == (ISM_MAP_SLOTS - 1) */
9025			ism_blkp = ism_blkp->iblk_next;
9026			if (ism_blkp != NULL) {
9027				ism_map[i] = ism_blkp->iblk_maps[0];
9028				i = 0;
9029			} else {
9030				ism_map[i].imap_seg = 0;
9031				ism_map[i].imap_vb_shift = 0;
9032				ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
9033				ism_map[i].imap_hatflags = 0;
9034				ism_map[i].imap_sz_mask = 0;
9035				ism_map[i].imap_ismhat = NULL;
9036				ism_map[i].imap_ment = NULL;
9037			}
9038		}
9039
9040		/*
9041		 * Now flush entire TSB for the process, since
9042		 * demapping page by page can be too expensive.
9043		 * We don't have to flush the TLB here anymore
9044		 * since we switch to a new TLB ctx instead.
9045		 * Also, there is no need to flush if the process
9046		 * is exiting since the TSB will be freed later.
9047		 */
9048		if (!sfmmup->sfmmu_free) {
9049			hatlockp = sfmmu_hat_enter(sfmmup);
9050			for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
9051			    tsbinfo = tsbinfo->tsb_next) {
9052				if (tsbinfo->tsb_flags & TSB_SWAPPED)
9053					continue;
9054				if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
9055					tsbinfo->tsb_flags |=
9056					    TSB_FLUSH_NEEDED;
9057					continue;
9058				}
9059
9060				sfmmu_inv_tsb(tsbinfo->tsb_va,
9061				    TSB_BYTES(tsbinfo->tsb_szc));
9062			}
9063			sfmmu_hat_exit(hatlockp);
9064		}
9065	}
9066
9067	/*
9068	 * Update our counters for this sfmmup's ism mappings.
9069	 */
9070	for (i = 0; i <= ismszc; i++) {
9071		if (!(disable_ism_large_pages & (1 << i)))
9072			(void) ism_tsb_entries(sfmmup, i);
9073	}
9074
9075	sfmmu_ismhat_exit(sfmmup, 0);
9076
9077	/*
9078	 * We must do our freeing here after dropping locks
9079	 * to prevent a deadlock in the kmem allocator on the
9080	 * mapping list lock.
9081	 */
9082	if (free_ment != NULL)
9083		kmem_cache_free(ism_ment_cache, free_ment);
9084
9085	/*
9086	 * Check TSB and TLB page sizes if the process isn't exiting.
9087	 */
9088	if (!sfmmup->sfmmu_free) {
9089		if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
9090			sfmmu_check_page_sizes(sfmmup, 1);
9091		} else {
9092			sfmmu_check_page_sizes(sfmmup, 0);
9093		}
9094	}
9095}
9096
9097/* ARGSUSED */
9098static int
9099sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
9100{
9101	/* void *buf is sfmmu_t pointer */
9102	bzero(buf, sizeof (sfmmu_t));
9103
9104	return (0);
9105}
9106
9107/* ARGSUSED */
9108static void
9109sfmmu_idcache_destructor(void *buf, void *cdrarg)
9110{
9111	/* void *buf is sfmmu_t pointer */
9112}
9113
9114/*
9115 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
9116 * field to be the pa of this hmeblk
9117 */
9118/* ARGSUSED */
9119static int
9120sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
9121{
9122	struct hme_blk *hmeblkp;
9123
9124	bzero(buf, (size_t)cdrarg);
9125	hmeblkp = (struct hme_blk *)buf;
9126	hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
9127
9128#ifdef	HBLK_TRACE
9129	mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
9130#endif	/* HBLK_TRACE */
9131
9132	return (0);
9133}
9134
9135/* ARGSUSED */
9136static void
9137sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
9138{
9139
9140#ifdef	HBLK_TRACE
9141
9142	struct hme_blk *hmeblkp;
9143
9144	hmeblkp = (struct hme_blk *)buf;
9145	mutex_destroy(&hmeblkp->hblk_audit_lock);
9146
9147#endif	/* HBLK_TRACE */
9148}
9149
9150#define	SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
9151static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
9152/*
9153 * The kmem allocator will callback into our reclaim routine when the system
9154 * is running low in memory.  We traverse the hash and free up all unused but
9155 * still cached hme_blks.  We also traverse the free list and free them up
9156 * as well.
9157 */
9158/*ARGSUSED*/
9159static void
9160sfmmu_hblkcache_reclaim(void *cdrarg)
9161{
9162	int i;
9163	struct hmehash_bucket *hmebp;
9164	struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
9165	static struct hmehash_bucket *uhmehash_reclaim_hand;
9166	static struct hmehash_bucket *khmehash_reclaim_hand;
9167	struct hme_blk *list = NULL, *last_hmeblkp;
9168	cpuset_t cpuset = cpu_ready_set;
9169	cpu_hme_pend_t *cpuhp;
9170
9171	/* Free up hmeblks on the cpu pending lists */
9172	for (i = 0; i < NCPU; i++) {
9173		cpuhp = &cpu_hme_pend[i];
9174		if (cpuhp->chp_listp != NULL)  {
9175			mutex_enter(&cpuhp->chp_mutex);
9176			if (cpuhp->chp_listp == NULL) {
9177				mutex_exit(&cpuhp->chp_mutex);
9178				continue;
9179			}
9180			for (last_hmeblkp = cpuhp->chp_listp;
9181			    last_hmeblkp->hblk_next != NULL;
9182			    last_hmeblkp = last_hmeblkp->hblk_next)
9183				;
9184			last_hmeblkp->hblk_next = list;
9185			list = cpuhp->chp_listp;
9186			cpuhp->chp_listp = NULL;
9187			cpuhp->chp_count = 0;
9188			mutex_exit(&cpuhp->chp_mutex);
9189		}
9190
9191	}
9192
9193	if (list != NULL) {
9194		kpreempt_disable();
9195		CPUSET_DEL(cpuset, CPU->cpu_id);
9196		xt_sync(cpuset);
9197		xt_sync(cpuset);
9198		kpreempt_enable();
9199		sfmmu_hblk_free(&list);
9200		list = NULL;
9201	}
9202
9203	hmebp = uhmehash_reclaim_hand;
9204	if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
9205		uhmehash_reclaim_hand = hmebp = uhme_hash;
9206	uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9207
9208	for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9209		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9210			hmeblkp = hmebp->hmeblkp;
9211			pr_hblk = NULL;
9212			while (hmeblkp) {
9213				nx_hblk = hmeblkp->hblk_next;
9214				if (!hmeblkp->hblk_vcnt &&
9215				    !hmeblkp->hblk_hmecnt) {
9216					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9217					    pr_hblk, &list, 0);
9218				} else {
9219					pr_hblk = hmeblkp;
9220				}
9221				hmeblkp = nx_hblk;
9222			}
9223			SFMMU_HASH_UNLOCK(hmebp);
9224		}
9225		if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
9226			hmebp = uhme_hash;
9227	}
9228
9229	hmebp = khmehash_reclaim_hand;
9230	if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
9231		khmehash_reclaim_hand = hmebp = khme_hash;
9232	khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9233
9234	for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9235		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9236			hmeblkp = hmebp->hmeblkp;
9237			pr_hblk = NULL;
9238			while (hmeblkp) {
9239				nx_hblk = hmeblkp->hblk_next;
9240				if (!hmeblkp->hblk_vcnt &&
9241				    !hmeblkp->hblk_hmecnt) {
9242					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9243					    pr_hblk, &list, 0);
9244				} else {
9245					pr_hblk = hmeblkp;
9246				}
9247				hmeblkp = nx_hblk;
9248			}
9249			SFMMU_HASH_UNLOCK(hmebp);
9250		}
9251		if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9252			hmebp = khme_hash;
9253	}
9254	sfmmu_hblks_list_purge(&list, 0);
9255}
9256
9257/*
9258 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9259 * same goes for sfmmu_get_addrvcolor().
9260 *
9261 * This function will return the virtual color for the specified page. The
9262 * virtual color corresponds to this page current mapping or its last mapping.
9263 * It is used by memory allocators to choose addresses with the correct
9264 * alignment so vac consistency is automatically maintained.  If the page
9265 * has no color it returns -1.
9266 */
9267/*ARGSUSED*/
9268int
9269sfmmu_get_ppvcolor(struct page *pp)
9270{
9271#ifdef VAC
9272	int color;
9273
9274	if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9275		return (-1);
9276	}
9277	color = PP_GET_VCOLOR(pp);
9278	ASSERT(color < mmu_btop(shm_alignment));
9279	return (color);
9280#else
9281	return (-1);
9282#endif	/* VAC */
9283}
9284
9285/*
9286 * This function will return the desired alignment for vac consistency
9287 * (vac color) given a virtual address.  If no vac is present it returns -1.
9288 */
9289/*ARGSUSED*/
9290int
9291sfmmu_get_addrvcolor(caddr_t vaddr)
9292{
9293#ifdef VAC
9294	if (cache & CACHE_VAC) {
9295		return (addr_to_vcolor(vaddr));
9296	} else {
9297		return (-1);
9298	}
9299#else
9300	return (-1);
9301#endif	/* VAC */
9302}
9303
9304#ifdef VAC
9305/*
9306 * Check for conflicts.
9307 * A conflict exists if the new and existent mappings do not match in
9308 * their "shm_alignment fields. If conflicts exist, the existant mappings
9309 * are flushed unless one of them is locked. If one of them is locked, then
9310 * the mappings are flushed and converted to non-cacheable mappings.
9311 */
9312static void
9313sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9314{
9315	struct hat *tmphat;
9316	struct sf_hment *sfhmep, *tmphme = NULL;
9317	struct hme_blk *hmeblkp;
9318	int vcolor;
9319	tte_t tte;
9320
9321	ASSERT(sfmmu_mlist_held(pp));
9322	ASSERT(!PP_ISNC(pp));		/* page better be cacheable */
9323
9324	vcolor = addr_to_vcolor(addr);
9325	if (PP_NEWPAGE(pp)) {
9326		PP_SET_VCOLOR(pp, vcolor);
9327		return;
9328	}
9329
9330	if (PP_GET_VCOLOR(pp) == vcolor) {
9331		return;
9332	}
9333
9334	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9335		/*
9336		 * Previous user of page had a different color
9337		 * but since there are no current users
9338		 * we just flush the cache and change the color.
9339		 */
9340		SFMMU_STAT(sf_pgcolor_conflict);
9341		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9342		PP_SET_VCOLOR(pp, vcolor);
9343		return;
9344	}
9345
9346	/*
9347	 * If we get here we have a vac conflict with a current
9348	 * mapping.  VAC conflict policy is as follows.
9349	 * - The default is to unload the other mappings unless:
9350	 * - If we have a large mapping we uncache the page.
9351	 * We need to uncache the rest of the large page too.
9352	 * - If any of the mappings are locked we uncache the page.
9353	 * - If the requested mapping is inconsistent
9354	 * with another mapping and that mapping
9355	 * is in the same address space we have to
9356	 * make it non-cached.  The default thing
9357	 * to do is unload the inconsistent mapping
9358	 * but if they are in the same address space
9359	 * we run the risk of unmapping the pc or the
9360	 * stack which we will use as we return to the user,
9361	 * in which case we can then fault on the thing
9362	 * we just unloaded and get into an infinite loop.
9363	 */
9364	if (PP_ISMAPPED_LARGE(pp)) {
9365		int sz;
9366
9367		/*
9368		 * Existing mapping is for big pages. We don't unload
9369		 * existing big mappings to satisfy new mappings.
9370		 * Always convert all mappings to TNC.
9371		 */
9372		sz = fnd_mapping_sz(pp);
9373		pp = PP_GROUPLEADER(pp, sz);
9374		SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9375		sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9376		    TTEPAGES(sz));
9377
9378		return;
9379	}
9380
9381	/*
9382	 * check if any mapping is in same as or if it is locked
9383	 * since in that case we need to uncache.
9384	 */
9385	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9386		tmphme = sfhmep->hme_next;
9387		if (IS_PAHME(sfhmep))
9388			continue;
9389		hmeblkp = sfmmu_hmetohblk(sfhmep);
9390		if (hmeblkp->hblk_xhat_bit)
9391			continue;
9392		tmphat = hblktosfmmu(hmeblkp);
9393		sfmmu_copytte(&sfhmep->hme_tte, &tte);
9394		ASSERT(TTE_IS_VALID(&tte));
9395		if (hmeblkp->hblk_shared || tmphat == hat ||
9396		    hmeblkp->hblk_lckcnt) {
9397			/*
9398			 * We have an uncache conflict
9399			 */
9400			SFMMU_STAT(sf_uncache_conflict);
9401			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9402			return;
9403		}
9404	}
9405
9406	/*
9407	 * We have an unload conflict
9408	 * We have already checked for LARGE mappings, therefore
9409	 * the remaining mapping(s) must be TTE8K.
9410	 */
9411	SFMMU_STAT(sf_unload_conflict);
9412
9413	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9414		tmphme = sfhmep->hme_next;
9415		if (IS_PAHME(sfhmep))
9416			continue;
9417		hmeblkp = sfmmu_hmetohblk(sfhmep);
9418		if (hmeblkp->hblk_xhat_bit)
9419			continue;
9420		ASSERT(!hmeblkp->hblk_shared);
9421		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9422	}
9423
9424	if (PP_ISMAPPED_KPM(pp))
9425		sfmmu_kpm_vac_unload(pp, addr);
9426
9427	/*
9428	 * Unloads only do TLB flushes so we need to flush the
9429	 * cache here.
9430	 */
9431	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9432	PP_SET_VCOLOR(pp, vcolor);
9433}
9434
9435/*
9436 * Whenever a mapping is unloaded and the page is in TNC state,
9437 * we see if the page can be made cacheable again. 'pp' is
9438 * the page that we just unloaded a mapping from, the size
9439 * of mapping that was unloaded is 'ottesz'.
9440 * Remark:
9441 * The recache policy for mpss pages can leave a performance problem
9442 * under the following circumstances:
9443 * . A large page in uncached mode has just been unmapped.
9444 * . All constituent pages are TNC due to a conflicting small mapping.
9445 * . There are many other, non conflicting, small mappings around for
9446 *   a lot of the constituent pages.
9447 * . We're called w/ the "old" groupleader page and the old ottesz,
9448 *   but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9449 *   we end up w/ TTE8K or npages == 1.
9450 * . We call tst_tnc w/ the old groupleader only, and if there is no
9451 *   conflict, we re-cache only this page.
9452 * . All other small mappings are not checked and will be left in TNC mode.
9453 * The problem is not very serious because:
9454 * . mpss is actually only defined for heap and stack, so the probability
9455 *   is not very high that a large page mapping exists in parallel to a small
9456 *   one (this is possible, but seems to be bad programming style in the
9457 *   appl).
9458 * . The problem gets a little bit more serious, when those TNC pages
9459 *   have to be mapped into kernel space, e.g. for networking.
9460 * . When VAC alias conflicts occur in applications, this is regarded
9461 *   as an application bug. So if kstat's show them, the appl should
9462 *   be changed anyway.
9463 */
9464void
9465conv_tnc(page_t *pp, int ottesz)
9466{
9467	int cursz, dosz;
9468	pgcnt_t curnpgs, dopgs;
9469	pgcnt_t pg64k;
9470	page_t *pp2;
9471
9472	/*
9473	 * Determine how big a range we check for TNC and find
9474	 * leader page. cursz is the size of the biggest
9475	 * mapping that still exist on 'pp'.
9476	 */
9477	if (PP_ISMAPPED_LARGE(pp)) {
9478		cursz = fnd_mapping_sz(pp);
9479	} else {
9480		cursz = TTE8K;
9481	}
9482
9483	if (ottesz >= cursz) {
9484		dosz = ottesz;
9485		pp2 = pp;
9486	} else {
9487		dosz = cursz;
9488		pp2 = PP_GROUPLEADER(pp, dosz);
9489	}
9490
9491	pg64k = TTEPAGES(TTE64K);
9492	dopgs = TTEPAGES(dosz);
9493
9494	ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9495
9496	while (dopgs != 0) {
9497		curnpgs = TTEPAGES(cursz);
9498		if (tst_tnc(pp2, curnpgs)) {
9499			SFMMU_STAT_ADD(sf_recache, curnpgs);
9500			sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9501			    curnpgs);
9502		}
9503
9504		ASSERT(dopgs >= curnpgs);
9505		dopgs -= curnpgs;
9506
9507		if (dopgs == 0) {
9508			break;
9509		}
9510
9511		pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9512		if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9513			cursz = fnd_mapping_sz(pp2);
9514		} else {
9515			cursz = TTE8K;
9516		}
9517	}
9518}
9519
9520/*
9521 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9522 * returns 0 otherwise. Note that oaddr argument is valid for only
9523 * 8k pages.
9524 */
9525int
9526tst_tnc(page_t *pp, pgcnt_t npages)
9527{
9528	struct	sf_hment *sfhme;
9529	struct	hme_blk *hmeblkp;
9530	tte_t	tte;
9531	caddr_t	vaddr;
9532	int	clr_valid = 0;
9533	int 	color, color1, bcolor;
9534	int	i, ncolors;
9535
9536	ASSERT(pp != NULL);
9537	ASSERT(!(cache & CACHE_WRITEBACK));
9538
9539	if (npages > 1) {
9540		ncolors = CACHE_NUM_COLOR;
9541	}
9542
9543	for (i = 0; i < npages; i++) {
9544		ASSERT(sfmmu_mlist_held(pp));
9545		ASSERT(PP_ISTNC(pp));
9546		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9547
9548		if (PP_ISPNC(pp)) {
9549			return (0);
9550		}
9551
9552		clr_valid = 0;
9553		if (PP_ISMAPPED_KPM(pp)) {
9554			caddr_t kpmvaddr;
9555
9556			ASSERT(kpm_enable);
9557			kpmvaddr = hat_kpm_page2va(pp, 1);
9558			ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9559			color1 = addr_to_vcolor(kpmvaddr);
9560			clr_valid = 1;
9561		}
9562
9563		for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9564			if (IS_PAHME(sfhme))
9565				continue;
9566			hmeblkp = sfmmu_hmetohblk(sfhme);
9567			if (hmeblkp->hblk_xhat_bit)
9568				continue;
9569
9570			sfmmu_copytte(&sfhme->hme_tte, &tte);
9571			ASSERT(TTE_IS_VALID(&tte));
9572
9573			vaddr = tte_to_vaddr(hmeblkp, tte);
9574			color = addr_to_vcolor(vaddr);
9575
9576			if (npages > 1) {
9577				/*
9578				 * If there is a big mapping, make sure
9579				 * 8K mapping is consistent with the big
9580				 * mapping.
9581				 */
9582				bcolor = i % ncolors;
9583				if (color != bcolor) {
9584					return (0);
9585				}
9586			}
9587			if (!clr_valid) {
9588				clr_valid = 1;
9589				color1 = color;
9590			}
9591
9592			if (color1 != color) {
9593				return (0);
9594			}
9595		}
9596
9597		pp = PP_PAGENEXT(pp);
9598	}
9599
9600	return (1);
9601}
9602
9603void
9604sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9605	pgcnt_t npages)
9606{
9607	kmutex_t *pmtx;
9608	int i, ncolors, bcolor;
9609	kpm_hlk_t *kpmp;
9610	cpuset_t cpuset;
9611
9612	ASSERT(pp != NULL);
9613	ASSERT(!(cache & CACHE_WRITEBACK));
9614
9615	kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9616	pmtx = sfmmu_page_enter(pp);
9617
9618	/*
9619	 * Fast path caching single unmapped page
9620	 */
9621	if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9622	    flags == HAT_CACHE) {
9623		PP_CLRTNC(pp);
9624		PP_CLRPNC(pp);
9625		sfmmu_page_exit(pmtx);
9626		sfmmu_kpm_kpmp_exit(kpmp);
9627		return;
9628	}
9629
9630	/*
9631	 * We need to capture all cpus in order to change cacheability
9632	 * because we can't allow one cpu to access the same physical
9633	 * page using a cacheable and a non-cachebale mapping at the same
9634	 * time. Since we may end up walking the ism mapping list
9635	 * have to grab it's lock now since we can't after all the
9636	 * cpus have been captured.
9637	 */
9638	sfmmu_hat_lock_all();
9639	mutex_enter(&ism_mlist_lock);
9640	kpreempt_disable();
9641	cpuset = cpu_ready_set;
9642	xc_attention(cpuset);
9643
9644	if (npages > 1) {
9645		/*
9646		 * Make sure all colors are flushed since the
9647		 * sfmmu_page_cache() only flushes one color-
9648		 * it does not know big pages.
9649		 */
9650		ncolors = CACHE_NUM_COLOR;
9651		if (flags & HAT_TMPNC) {
9652			for (i = 0; i < ncolors; i++) {
9653				sfmmu_cache_flushcolor(i, pp->p_pagenum);
9654			}
9655			cache_flush_flag = CACHE_NO_FLUSH;
9656		}
9657	}
9658
9659	for (i = 0; i < npages; i++) {
9660
9661		ASSERT(sfmmu_mlist_held(pp));
9662
9663		if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9664
9665			if (npages > 1) {
9666				bcolor = i % ncolors;
9667			} else {
9668				bcolor = NO_VCOLOR;
9669			}
9670
9671			sfmmu_page_cache(pp, flags, cache_flush_flag,
9672			    bcolor);
9673		}
9674
9675		pp = PP_PAGENEXT(pp);
9676	}
9677
9678	xt_sync(cpuset);
9679	xc_dismissed(cpuset);
9680	mutex_exit(&ism_mlist_lock);
9681	sfmmu_hat_unlock_all();
9682	sfmmu_page_exit(pmtx);
9683	sfmmu_kpm_kpmp_exit(kpmp);
9684	kpreempt_enable();
9685}
9686
9687/*
9688 * This function changes the virtual cacheability of all mappings to a
9689 * particular page.  When changing from uncache to cacheable the mappings will
9690 * only be changed if all of them have the same virtual color.
9691 * We need to flush the cache in all cpus.  It is possible that
9692 * a process referenced a page as cacheable but has sinced exited
9693 * and cleared the mapping list.  We still to flush it but have no
9694 * state so all cpus is the only alternative.
9695 */
9696static void
9697sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9698{
9699	struct	sf_hment *sfhme;
9700	struct	hme_blk *hmeblkp;
9701	sfmmu_t *sfmmup;
9702	tte_t	tte, ttemod;
9703	caddr_t	vaddr;
9704	int	ret, color;
9705	pfn_t	pfn;
9706
9707	color = bcolor;
9708	pfn = pp->p_pagenum;
9709
9710	for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9711
9712		if (IS_PAHME(sfhme))
9713			continue;
9714		hmeblkp = sfmmu_hmetohblk(sfhme);
9715
9716		if (hmeblkp->hblk_xhat_bit)
9717			continue;
9718
9719		sfmmu_copytte(&sfhme->hme_tte, &tte);
9720		ASSERT(TTE_IS_VALID(&tte));
9721		vaddr = tte_to_vaddr(hmeblkp, tte);
9722		color = addr_to_vcolor(vaddr);
9723
9724#ifdef DEBUG
9725		if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9726			ASSERT(color == bcolor);
9727		}
9728#endif
9729
9730		ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9731
9732		ttemod = tte;
9733		if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9734			TTE_CLR_VCACHEABLE(&ttemod);
9735		} else {	/* flags & HAT_CACHE */
9736			TTE_SET_VCACHEABLE(&ttemod);
9737		}
9738		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9739		if (ret < 0) {
9740			/*
9741			 * Since all cpus are captured modifytte should not
9742			 * fail.
9743			 */
9744			panic("sfmmu_page_cache: write to tte failed");
9745		}
9746
9747		sfmmup = hblktosfmmu(hmeblkp);
9748		if (cache_flush_flag == CACHE_FLUSH) {
9749			/*
9750			 * Flush TSBs, TLBs and caches
9751			 */
9752			if (hmeblkp->hblk_shared) {
9753				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9754				uint_t rid = hmeblkp->hblk_tag.htag_rid;
9755				sf_region_t *rgnp;
9756				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9757				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9758				ASSERT(srdp != NULL);
9759				rgnp = srdp->srd_hmergnp[rid];
9760				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9761				    srdp, rgnp, rid);
9762				(void) sfmmu_rgntlb_demap(vaddr, rgnp,
9763				    hmeblkp, 0);
9764				sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9765			} else if (sfmmup->sfmmu_ismhat) {
9766				if (flags & HAT_CACHE) {
9767					SFMMU_STAT(sf_ism_recache);
9768				} else {
9769					SFMMU_STAT(sf_ism_uncache);
9770				}
9771				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9772				    pfn, CACHE_FLUSH);
9773			} else {
9774				sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9775				    pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9776			}
9777
9778			/*
9779			 * all cache entries belonging to this pfn are
9780			 * now flushed.
9781			 */
9782			cache_flush_flag = CACHE_NO_FLUSH;
9783		} else {
9784			/*
9785			 * Flush only TSBs and TLBs.
9786			 */
9787			if (hmeblkp->hblk_shared) {
9788				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9789				uint_t rid = hmeblkp->hblk_tag.htag_rid;
9790				sf_region_t *rgnp;
9791				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9792				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9793				ASSERT(srdp != NULL);
9794				rgnp = srdp->srd_hmergnp[rid];
9795				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9796				    srdp, rgnp, rid);
9797				(void) sfmmu_rgntlb_demap(vaddr, rgnp,
9798				    hmeblkp, 0);
9799			} else if (sfmmup->sfmmu_ismhat) {
9800				if (flags & HAT_CACHE) {
9801					SFMMU_STAT(sf_ism_recache);
9802				} else {
9803					SFMMU_STAT(sf_ism_uncache);
9804				}
9805				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9806				    pfn, CACHE_NO_FLUSH);
9807			} else {
9808				sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9809			}
9810		}
9811	}
9812
9813	if (PP_ISMAPPED_KPM(pp))
9814		sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9815
9816	switch (flags) {
9817
9818		default:
9819			panic("sfmmu_pagecache: unknown flags");
9820			break;
9821
9822		case HAT_CACHE:
9823			PP_CLRTNC(pp);
9824			PP_CLRPNC(pp);
9825			PP_SET_VCOLOR(pp, color);
9826			break;
9827
9828		case HAT_TMPNC:
9829			PP_SETTNC(pp);
9830			PP_SET_VCOLOR(pp, NO_VCOLOR);
9831			break;
9832
9833		case HAT_UNCACHE:
9834			PP_SETPNC(pp);
9835			PP_CLRTNC(pp);
9836			PP_SET_VCOLOR(pp, NO_VCOLOR);
9837			break;
9838	}
9839}
9840#endif	/* VAC */
9841
9842
9843/*
9844 * Wrapper routine used to return a context.
9845 *
9846 * It's the responsibility of the caller to guarantee that the
9847 * process serializes on calls here by taking the HAT lock for
9848 * the hat.
9849 *
9850 */
9851static void
9852sfmmu_get_ctx(sfmmu_t *sfmmup)
9853{
9854	mmu_ctx_t *mmu_ctxp;
9855	uint_t pstate_save;
9856	int ret;
9857
9858	ASSERT(sfmmu_hat_lock_held(sfmmup));
9859	ASSERT(sfmmup != ksfmmup);
9860
9861	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9862		sfmmu_setup_tsbinfo(sfmmup);
9863		SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9864	}
9865
9866	kpreempt_disable();
9867
9868	mmu_ctxp = CPU_MMU_CTXP(CPU);
9869	ASSERT(mmu_ctxp);
9870	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9871	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9872
9873	/*
9874	 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9875	 */
9876	if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9877		sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9878
9879	/*
9880	 * Let the MMU set up the page sizes to use for
9881	 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9882	 */
9883	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9884		mmu_set_ctx_page_sizes(sfmmup);
9885	}
9886
9887	/*
9888	 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9889	 * interrupts disabled to prevent race condition with wrap-around
9890	 * ctx invalidatation. In sun4v, ctx invalidation also involves
9891	 * a HV call to set the number of TSBs to 0. If interrupts are not
9892	 * disabled until after sfmmu_load_mmustate is complete TSBs may
9893	 * become assigned to INVALID_CONTEXT. This is not allowed.
9894	 */
9895	pstate_save = sfmmu_disable_intrs();
9896
9897	if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9898	    sfmmup->sfmmu_scdp != NULL) {
9899		sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9900		sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9901		ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9902		/* debug purpose only */
9903		ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9904		    != INVALID_CONTEXT);
9905	}
9906	sfmmu_load_mmustate(sfmmup);
9907
9908	sfmmu_enable_intrs(pstate_save);
9909
9910	kpreempt_enable();
9911}
9912
9913/*
9914 * When all cnums are used up in a MMU, cnum will wrap around to the
9915 * next generation and start from 2.
9916 */
9917static void
9918sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9919{
9920
9921	/* caller must have disabled the preemption */
9922	ASSERT(curthread->t_preempt >= 1);
9923	ASSERT(mmu_ctxp != NULL);
9924
9925	/* acquire Per-MMU (PM) spin lock */
9926	mutex_enter(&mmu_ctxp->mmu_lock);
9927
9928	/* re-check to see if wrap-around is needed */
9929	if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9930		goto done;
9931
9932	SFMMU_MMU_STAT(mmu_wrap_around);
9933
9934	/* update gnum */
9935	ASSERT(mmu_ctxp->mmu_gnum != 0);
9936	mmu_ctxp->mmu_gnum++;
9937	if (mmu_ctxp->mmu_gnum == 0 ||
9938	    mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9939		cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9940		    (void *)mmu_ctxp);
9941	}
9942
9943	if (mmu_ctxp->mmu_ncpus > 1) {
9944		cpuset_t cpuset;
9945
9946		membar_enter(); /* make sure updated gnum visible */
9947
9948		SFMMU_XCALL_STATS(NULL);
9949
9950		/* xcall to others on the same MMU to invalidate ctx */
9951		cpuset = mmu_ctxp->mmu_cpuset;
9952		ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9953		CPUSET_DEL(cpuset, CPU->cpu_id);
9954		CPUSET_AND(cpuset, cpu_ready_set);
9955
9956		/*
9957		 * Pass in INVALID_CONTEXT as the first parameter to
9958		 * sfmmu_raise_tsb_exception, which invalidates the context
9959		 * of any process running on the CPUs in the MMU.
9960		 */
9961		xt_some(cpuset, sfmmu_raise_tsb_exception,
9962		    INVALID_CONTEXT, INVALID_CONTEXT);
9963		xt_sync(cpuset);
9964
9965		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9966	}
9967
9968	if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9969		sfmmu_setctx_sec(INVALID_CONTEXT);
9970		sfmmu_clear_utsbinfo();
9971	}
9972
9973	/*
9974	 * No xcall is needed here. For sun4u systems all CPUs in context
9975	 * domain share a single physical MMU therefore it's enough to flush
9976	 * TLB on local CPU. On sun4v systems we use 1 global context
9977	 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9978	 * handler. Note that vtag_flushall_uctxs() is called
9979	 * for Ultra II machine, where the equivalent flushall functionality
9980	 * is implemented in SW, and only user ctx TLB entries are flushed.
9981	 */
9982	if (&vtag_flushall_uctxs != NULL) {
9983		vtag_flushall_uctxs();
9984	} else {
9985		vtag_flushall();
9986	}
9987
9988	/* reset mmu cnum, skips cnum 0 and 1 */
9989	if (reset_cnum == B_TRUE)
9990		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9991
9992done:
9993	mutex_exit(&mmu_ctxp->mmu_lock);
9994}
9995
9996
9997/*
9998 * For multi-threaded process, set the process context to INVALID_CONTEXT
9999 * so that it faults and reloads the MMU state from TL=0. For single-threaded
10000 * process, we can just load the MMU state directly without having to
10001 * set context invalid. Caller must hold the hat lock since we don't
10002 * acquire it here.
10003 */
10004static void
10005sfmmu_sync_mmustate(sfmmu_t *sfmmup)
10006{
10007	uint_t cnum;
10008	uint_t pstate_save;
10009
10010	ASSERT(sfmmup != ksfmmup);
10011	ASSERT(sfmmu_hat_lock_held(sfmmup));
10012
10013	kpreempt_disable();
10014
10015	/*
10016	 * We check whether the pass'ed-in sfmmup is the same as the
10017	 * current running proc. This is to makes sure the current proc
10018	 * stays single-threaded if it already is.
10019	 */
10020	if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
10021	    (curthread->t_procp->p_lwpcnt == 1)) {
10022		/* single-thread */
10023		cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
10024		if (cnum != INVALID_CONTEXT) {
10025			uint_t curcnum;
10026			/*
10027			 * Disable interrupts to prevent race condition
10028			 * with sfmmu_ctx_wrap_around ctx invalidation.
10029			 * In sun4v, ctx invalidation involves setting
10030			 * TSB to NULL, hence, interrupts should be disabled
10031			 * untill after sfmmu_load_mmustate is completed.
10032			 */
10033			pstate_save = sfmmu_disable_intrs();
10034			curcnum = sfmmu_getctx_sec();
10035			if (curcnum == cnum)
10036				sfmmu_load_mmustate(sfmmup);
10037			sfmmu_enable_intrs(pstate_save);
10038			ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
10039		}
10040	} else {
10041		/*
10042		 * multi-thread
10043		 * or when sfmmup is not the same as the curproc.
10044		 */
10045		sfmmu_invalidate_ctx(sfmmup);
10046	}
10047
10048	kpreempt_enable();
10049}
10050
10051
10052/*
10053 * Replace the specified TSB with a new TSB.  This function gets called when
10054 * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
10055 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10056 * (8K).
10057 *
10058 * Caller must hold the HAT lock, but should assume any tsb_info
10059 * pointers it has are no longer valid after calling this function.
10060 *
10061 * Return values:
10062 *	TSB_ALLOCFAIL	Failed to allocate a TSB, due to memory constraints
10063 *	TSB_LOSTRACE	HAT is busy, i.e. another thread is already doing
10064 *			something to this tsbinfo/TSB
10065 *	TSB_SUCCESS	Operation succeeded
10066 */
10067static tsb_replace_rc_t
10068sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
10069    hatlock_t *hatlockp, uint_t flags)
10070{
10071	struct tsb_info *new_tsbinfo = NULL;
10072	struct tsb_info *curtsb, *prevtsb;
10073	uint_t tte_sz_mask;
10074	int i;
10075
10076	ASSERT(sfmmup != ksfmmup);
10077	ASSERT(sfmmup->sfmmu_ismhat == 0);
10078	ASSERT(sfmmu_hat_lock_held(sfmmup));
10079	ASSERT(szc <= tsb_max_growsize);
10080
10081	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
10082		return (TSB_LOSTRACE);
10083
10084	/*
10085	 * Find the tsb_info ahead of this one in the list, and
10086	 * also make sure that the tsb_info passed in really
10087	 * exists!
10088	 */
10089	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10090	    curtsb != old_tsbinfo && curtsb != NULL;
10091	    prevtsb = curtsb, curtsb = curtsb->tsb_next)
10092		;
10093	ASSERT(curtsb != NULL);
10094
10095	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10096		/*
10097		 * The process is swapped out, so just set the new size
10098		 * code.  When it swaps back in, we'll allocate a new one
10099		 * of the new chosen size.
10100		 */
10101		curtsb->tsb_szc = szc;
10102		return (TSB_SUCCESS);
10103	}
10104	SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
10105
10106	tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
10107
10108	/*
10109	 * All initialization is done inside of sfmmu_tsbinfo_alloc().
10110	 * If we fail to allocate a TSB, exit.
10111	 *
10112	 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
10113	 * then try 4M slab after the initial alloc fails.
10114	 *
10115	 * If tsb swapin with tsb size > 4M, then try 4M after the
10116	 * initial alloc fails.
10117	 */
10118	sfmmu_hat_exit(hatlockp);
10119	if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
10120	    tte_sz_mask, flags, sfmmup) &&
10121	    (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
10122	    (!(flags & TSB_SWAPIN) &&
10123	    (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
10124	    sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
10125	    tte_sz_mask, flags, sfmmup))) {
10126		(void) sfmmu_hat_enter(sfmmup);
10127		if (!(flags & TSB_SWAPIN))
10128			SFMMU_STAT(sf_tsb_resize_failures);
10129		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10130		return (TSB_ALLOCFAIL);
10131	}
10132	(void) sfmmu_hat_enter(sfmmup);
10133
10134	/*
10135	 * Re-check to make sure somebody else didn't muck with us while we
10136	 * didn't hold the HAT lock.  If the process swapped out, fine, just
10137	 * exit; this can happen if we try to shrink the TSB from the context
10138	 * of another process (such as on an ISM unmap), though it is rare.
10139	 */
10140	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10141		SFMMU_STAT(sf_tsb_resize_failures);
10142		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10143		sfmmu_hat_exit(hatlockp);
10144		sfmmu_tsbinfo_free(new_tsbinfo);
10145		(void) sfmmu_hat_enter(sfmmup);
10146		return (TSB_LOSTRACE);
10147	}
10148
10149#ifdef	DEBUG
10150	/* Reverify that the tsb_info still exists.. for debugging only */
10151	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10152	    curtsb != old_tsbinfo && curtsb != NULL;
10153	    prevtsb = curtsb, curtsb = curtsb->tsb_next)
10154		;
10155	ASSERT(curtsb != NULL);
10156#endif	/* DEBUG */
10157
10158	/*
10159	 * Quiesce any CPUs running this process on their next TLB miss
10160	 * so they atomically see the new tsb_info.  We temporarily set the
10161	 * context to invalid context so new threads that come on processor
10162	 * after we do the xcall to cpusran will also serialize behind the
10163	 * HAT lock on TLB miss and will see the new TSB.  Since this short
10164	 * race with a new thread coming on processor is relatively rare,
10165	 * this synchronization mechanism should be cheaper than always
10166	 * pausing all CPUs for the duration of the setup, which is what
10167	 * the old implementation did.  This is particuarly true if we are
10168	 * copying a huge chunk of memory around during that window.
10169	 *
10170	 * The memory barriers are to make sure things stay consistent
10171	 * with resume() since it does not hold the HAT lock while
10172	 * walking the list of tsb_info structures.
10173	 */
10174	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
10175		/* The TSB is either growing or shrinking. */
10176		sfmmu_invalidate_ctx(sfmmup);
10177	} else {
10178		/*
10179		 * It is illegal to swap in TSBs from a process other
10180		 * than a process being swapped in.  This in turn
10181		 * implies we do not have a valid MMU context here
10182		 * since a process needs one to resolve translation
10183		 * misses.
10184		 */
10185		ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
10186	}
10187
10188#ifdef DEBUG
10189	ASSERT(max_mmu_ctxdoms > 0);
10190
10191	/*
10192	 * Process should have INVALID_CONTEXT on all MMUs
10193	 */
10194	for (i = 0; i < max_mmu_ctxdoms; i++) {
10195
10196		ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
10197	}
10198#endif
10199
10200	new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
10201	membar_stst();	/* strict ordering required */
10202	if (prevtsb)
10203		prevtsb->tsb_next = new_tsbinfo;
10204	else
10205		sfmmup->sfmmu_tsb = new_tsbinfo;
10206	membar_enter();	/* make sure new TSB globally visible */
10207
10208	/*
10209	 * We need to migrate TSB entries from the old TSB to the new TSB
10210	 * if tsb_remap_ttes is set and the TSB is growing.
10211	 */
10212	if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
10213		sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
10214
10215	SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10216
10217	/*
10218	 * Drop the HAT lock to free our old tsb_info.
10219	 */
10220	sfmmu_hat_exit(hatlockp);
10221
10222	if ((flags & TSB_GROW) == TSB_GROW) {
10223		SFMMU_STAT(sf_tsb_grow);
10224	} else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
10225		SFMMU_STAT(sf_tsb_shrink);
10226	}
10227
10228	sfmmu_tsbinfo_free(old_tsbinfo);
10229
10230	(void) sfmmu_hat_enter(sfmmup);
10231	return (TSB_SUCCESS);
10232}
10233
10234/*
10235 * This function will re-program hat pgsz array, and invalidate the
10236 * process' context, forcing the process to switch to another
10237 * context on the next TLB miss, and therefore start using the
10238 * TLB that is reprogrammed for the new page sizes.
10239 */
10240void
10241sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10242{
10243	int i;
10244	hatlock_t *hatlockp = NULL;
10245
10246	hatlockp = sfmmu_hat_enter(sfmmup);
10247	/* USIII+-IV+ optimization, requires hat lock */
10248	if (tmp_pgsz) {
10249		for (i = 0; i < mmu_page_sizes; i++)
10250			sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10251	}
10252	SFMMU_STAT(sf_tlb_reprog_pgsz);
10253
10254	sfmmu_invalidate_ctx(sfmmup);
10255
10256	sfmmu_hat_exit(hatlockp);
10257}
10258
10259/*
10260 * The scd_rttecnt field in the SCD must be updated to take account of the
10261 * regions which it contains.
10262 */
10263static void
10264sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10265{
10266	uint_t rid;
10267	uint_t i, j;
10268	ulong_t w;
10269	sf_region_t *rgnp;
10270
10271	ASSERT(srdp != NULL);
10272
10273	for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10274		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10275			continue;
10276		}
10277
10278		j = 0;
10279		while (w) {
10280			if (!(w & 0x1)) {
10281				j++;
10282				w >>= 1;
10283				continue;
10284			}
10285			rid = (i << BT_ULSHIFT) | j;
10286			j++;
10287			w >>= 1;
10288
10289			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10290			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10291			rgnp = srdp->srd_hmergnp[rid];
10292			ASSERT(rgnp->rgn_refcnt > 0);
10293			ASSERT(rgnp->rgn_id == rid);
10294
10295			scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10296			    rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10297
10298			/*
10299			 * Maintain the tsb0 inflation cnt for the regions
10300			 * in the SCD.
10301			 */
10302			if (rgnp->rgn_pgszc >= TTE4M) {
10303				scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10304				    rgnp->rgn_size >>
10305				    (TTE_PAGE_SHIFT(TTE8K) + 2);
10306			}
10307		}
10308	}
10309}
10310
10311/*
10312 * This function assumes that there are either four or six supported page
10313 * sizes and at most two programmable TLBs, so we need to decide which
10314 * page sizes are most important and then tell the MMU layer so it
10315 * can adjust the TLB page sizes accordingly (if supported).
10316 *
10317 * If these assumptions change, this function will need to be
10318 * updated to support whatever the new limits are.
10319 *
10320 * The growing flag is nonzero if we are growing the address space,
10321 * and zero if it is shrinking.  This allows us to decide whether
10322 * to grow or shrink our TSB, depending upon available memory
10323 * conditions.
10324 */
10325static void
10326sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10327{
10328	uint64_t ttecnt[MMU_PAGE_SIZES];
10329	uint64_t tte8k_cnt, tte4m_cnt;
10330	uint8_t i;
10331	int sectsb_thresh;
10332
10333	/*
10334	 * Kernel threads, processes with small address spaces not using
10335	 * large pages, and dummy ISM HATs need not apply.
10336	 */
10337	if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10338		return;
10339
10340	if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10341	    sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10342		return;
10343
10344	for (i = 0; i < mmu_page_sizes; i++) {
10345		ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10346		    sfmmup->sfmmu_ismttecnt[i];
10347	}
10348
10349	/* Check pagesizes in use, and possibly reprogram DTLB. */
10350	if (&mmu_check_page_sizes)
10351		mmu_check_page_sizes(sfmmup, ttecnt);
10352
10353	/*
10354	 * Calculate the number of 8k ttes to represent the span of these
10355	 * pages.
10356	 */
10357	tte8k_cnt = ttecnt[TTE8K] +
10358	    (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10359	    (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10360	if (mmu_page_sizes == max_mmu_page_sizes) {
10361		tte4m_cnt = ttecnt[TTE4M] +
10362		    (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10363		    (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10364	} else {
10365		tte4m_cnt = ttecnt[TTE4M];
10366	}
10367
10368	/*
10369	 * Inflate tte8k_cnt to allow for region large page allocation failure.
10370	 */
10371	tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10372
10373	/*
10374	 * Inflate TSB sizes by a factor of 2 if this process
10375	 * uses 4M text pages to minimize extra conflict misses
10376	 * in the first TSB since without counting text pages
10377	 * 8K TSB may become too small.
10378	 *
10379	 * Also double the size of the second TSB to minimize
10380	 * extra conflict misses due to competition between 4M text pages
10381	 * and data pages.
10382	 *
10383	 * We need to adjust the second TSB allocation threshold by the
10384	 * inflation factor, since there is no point in creating a second
10385	 * TSB when we know all the mappings can fit in the I/D TLBs.
10386	 */
10387	sectsb_thresh = tsb_sectsb_threshold;
10388	if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10389		tte8k_cnt <<= 1;
10390		tte4m_cnt <<= 1;
10391		sectsb_thresh <<= 1;
10392	}
10393
10394	/*
10395	 * Check to see if our TSB is the right size; we may need to
10396	 * grow or shrink it.  If the process is small, our work is
10397	 * finished at this point.
10398	 */
10399	if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10400		return;
10401	}
10402	sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10403}
10404
10405static void
10406sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10407	uint64_t tte4m_cnt, int sectsb_thresh)
10408{
10409	int tsb_bits;
10410	uint_t tsb_szc;
10411	struct tsb_info *tsbinfop;
10412	hatlock_t *hatlockp = NULL;
10413
10414	hatlockp = sfmmu_hat_enter(sfmmup);
10415	ASSERT(hatlockp != NULL);
10416	tsbinfop = sfmmup->sfmmu_tsb;
10417	ASSERT(tsbinfop != NULL);
10418
10419	/*
10420	 * If we're growing, select the size based on RSS.  If we're
10421	 * shrinking, leave some room so we don't have to turn around and
10422	 * grow again immediately.
10423	 */
10424	if (growing)
10425		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10426	else
10427		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10428
10429	if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10430	    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10431		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10432		    hatlockp, TSB_SHRINK);
10433	} else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10434		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10435		    hatlockp, TSB_GROW);
10436	}
10437	tsbinfop = sfmmup->sfmmu_tsb;
10438
10439	/*
10440	 * With the TLB and first TSB out of the way, we need to see if
10441	 * we need a second TSB for 4M pages.  If we managed to reprogram
10442	 * the TLB page sizes above, the process will start using this new
10443	 * TSB right away; otherwise, it will start using it on the next
10444	 * context switch.  Either way, it's no big deal so there's no
10445	 * synchronization with the trap handlers here unless we grow the
10446	 * TSB (in which case it's required to prevent using the old one
10447	 * after it's freed). Note: second tsb is required for 32M/256M
10448	 * page sizes.
10449	 */
10450	if (tte4m_cnt > sectsb_thresh) {
10451		/*
10452		 * If we're growing, select the size based on RSS.  If we're
10453		 * shrinking, leave some room so we don't have to turn
10454		 * around and grow again immediately.
10455		 */
10456		if (growing)
10457			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10458		else
10459			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10460		if (tsbinfop->tsb_next == NULL) {
10461			struct tsb_info *newtsb;
10462			int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10463			    0 : TSB_ALLOC;
10464
10465			sfmmu_hat_exit(hatlockp);
10466
10467			/*
10468			 * Try to allocate a TSB for 4[32|256]M pages.  If we
10469			 * can't get the size we want, retry w/a minimum sized
10470			 * TSB.  If that still didn't work, give up; we can
10471			 * still run without one.
10472			 */
10473			tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10474			    TSB4M|TSB32M|TSB256M:TSB4M;
10475			if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10476			    allocflags, sfmmup)) &&
10477			    (tsb_szc <= TSB_4M_SZCODE ||
10478			    sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10479			    tsb_bits, allocflags, sfmmup)) &&
10480			    sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10481			    tsb_bits, allocflags, sfmmup)) {
10482				return;
10483			}
10484
10485			hatlockp = sfmmu_hat_enter(sfmmup);
10486
10487			sfmmu_invalidate_ctx(sfmmup);
10488
10489			if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10490				sfmmup->sfmmu_tsb->tsb_next = newtsb;
10491				SFMMU_STAT(sf_tsb_sectsb_create);
10492				sfmmu_hat_exit(hatlockp);
10493				return;
10494			} else {
10495				/*
10496				 * It's annoying, but possible for us
10497				 * to get here.. we dropped the HAT lock
10498				 * because of locking order in the kmem
10499				 * allocator, and while we were off getting
10500				 * our memory, some other thread decided to
10501				 * do us a favor and won the race to get a
10502				 * second TSB for this process.  Sigh.
10503				 */
10504				sfmmu_hat_exit(hatlockp);
10505				sfmmu_tsbinfo_free(newtsb);
10506				return;
10507			}
10508		}
10509
10510		/*
10511		 * We have a second TSB, see if it's big enough.
10512		 */
10513		tsbinfop = tsbinfop->tsb_next;
10514
10515		/*
10516		 * Check to see if our second TSB is the right size;
10517		 * we may need to grow or shrink it.
10518		 * To prevent thrashing (e.g. growing the TSB on a
10519		 * subsequent map operation), only try to shrink if
10520		 * the TSB reach exceeds twice the virtual address
10521		 * space size.
10522		 */
10523		if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10524		    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10525			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10526			    tsb_szc, hatlockp, TSB_SHRINK);
10527		} else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10528		    TSB_OK_GROW()) {
10529			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10530			    tsb_szc, hatlockp, TSB_GROW);
10531		}
10532	}
10533
10534	sfmmu_hat_exit(hatlockp);
10535}
10536
10537/*
10538 * Free up a sfmmu
10539 * Since the sfmmu is currently embedded in the hat struct we simply zero
10540 * out our fields and free up the ism map blk list if any.
10541 */
10542static void
10543sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10544{
10545	ism_blk_t	*blkp, *nx_blkp;
10546#ifdef	DEBUG
10547	ism_map_t	*map;
10548	int 		i;
10549#endif
10550
10551	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10552	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10553	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10554	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10555	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10556	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10557	ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10558
10559	sfmmup->sfmmu_free = 0;
10560	sfmmup->sfmmu_ismhat = 0;
10561
10562	blkp = sfmmup->sfmmu_iblk;
10563	sfmmup->sfmmu_iblk = NULL;
10564
10565	while (blkp) {
10566#ifdef	DEBUG
10567		map = blkp->iblk_maps;
10568		for (i = 0; i < ISM_MAP_SLOTS; i++) {
10569			ASSERT(map[i].imap_seg == 0);
10570			ASSERT(map[i].imap_ismhat == NULL);
10571			ASSERT(map[i].imap_ment == NULL);
10572		}
10573#endif
10574		nx_blkp = blkp->iblk_next;
10575		blkp->iblk_next = NULL;
10576		blkp->iblk_nextpa = (uint64_t)-1;
10577		kmem_cache_free(ism_blk_cache, blkp);
10578		blkp = nx_blkp;
10579	}
10580}
10581
10582/*
10583 * Locking primitves accessed by HATLOCK macros
10584 */
10585
10586#define	SFMMU_SPL_MTX	(0x0)
10587#define	SFMMU_ML_MTX	(0x1)
10588
10589#define	SFMMU_MLSPL_MTX(type, pg)	(((type) == SFMMU_SPL_MTX) ? \
10590					    SPL_HASH(pg) : MLIST_HASH(pg))
10591
10592kmutex_t *
10593sfmmu_page_enter(struct page *pp)
10594{
10595	return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10596}
10597
10598void
10599sfmmu_page_exit(kmutex_t *spl)
10600{
10601	mutex_exit(spl);
10602}
10603
10604int
10605sfmmu_page_spl_held(struct page *pp)
10606{
10607	return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10608}
10609
10610kmutex_t *
10611sfmmu_mlist_enter(struct page *pp)
10612{
10613	return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10614}
10615
10616void
10617sfmmu_mlist_exit(kmutex_t *mml)
10618{
10619	mutex_exit(mml);
10620}
10621
10622int
10623sfmmu_mlist_held(struct page *pp)
10624{
10625
10626	return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10627}
10628
10629/*
10630 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter().  For
10631 * sfmmu_mlist_enter() case mml_table lock array is used and for
10632 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10633 *
10634 * The lock is taken on a root page so that it protects an operation on all
10635 * constituent pages of a large page pp belongs to.
10636 *
10637 * The routine takes a lock from the appropriate array. The lock is determined
10638 * by hashing the root page. After taking the lock this routine checks if the
10639 * root page has the same size code that was used to determine the root (i.e
10640 * that root hasn't changed).  If root page has the expected p_szc field we
10641 * have the right lock and it's returned to the caller. If root's p_szc
10642 * decreased we release the lock and retry from the beginning.  This case can
10643 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10644 * value and taking the lock. The number of retries due to p_szc decrease is
10645 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10646 * determined by hashing pp itself.
10647 *
10648 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10649 * possible that p_szc can increase. To increase p_szc a thread has to lock
10650 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10651 * callers that don't hold a page locked recheck if hmeblk through which pp
10652 * was found still maps this pp.  If it doesn't map it anymore returned lock
10653 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10654 * p_szc increase after taking the lock it returns this lock without further
10655 * retries because in this case the caller doesn't care about which lock was
10656 * taken. The caller will drop it right away.
10657 *
10658 * After the routine returns it's guaranteed that hat_page_demote() can't
10659 * change p_szc field of any of constituent pages of a large page pp belongs
10660 * to as long as pp was either locked at least SHARED prior to this call or
10661 * the caller finds that hment that pointed to this pp still references this
10662 * pp (this also assumes that the caller holds hme hash bucket lock so that
10663 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10664 * hat_pageunload()).
10665 */
10666static kmutex_t *
10667sfmmu_mlspl_enter(struct page *pp, int type)
10668{
10669	kmutex_t	*mtx;
10670	uint_t		prev_rszc = UINT_MAX;
10671	page_t		*rootpp;
10672	uint_t		szc;
10673	uint_t		rszc;
10674	uint_t		pszc = pp->p_szc;
10675
10676	ASSERT(pp != NULL);
10677
10678again:
10679	if (pszc == 0) {
10680		mtx = SFMMU_MLSPL_MTX(type, pp);
10681		mutex_enter(mtx);
10682		return (mtx);
10683	}
10684
10685	/* The lock lives in the root page */
10686	rootpp = PP_GROUPLEADER(pp, pszc);
10687	mtx = SFMMU_MLSPL_MTX(type, rootpp);
10688	mutex_enter(mtx);
10689
10690	/*
10691	 * Return mml in the following 3 cases:
10692	 *
10693	 * 1) If pp itself is root since if its p_szc decreased before we took
10694	 * the lock pp is still the root of smaller szc page. And if its p_szc
10695	 * increased it doesn't matter what lock we return (see comment in
10696	 * front of this routine).
10697	 *
10698	 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10699	 * large page we have the right lock since any previous potential
10700	 * hat_page_demote() is done demoting from greater than current root's
10701	 * p_szc because hat_page_demote() changes root's p_szc last. No
10702	 * further hat_page_demote() can start or be in progress since it
10703	 * would need the same lock we currently hold.
10704	 *
10705	 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10706	 * matter what lock we return (see comment in front of this routine).
10707	 */
10708	if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10709	    rszc >= prev_rszc) {
10710		return (mtx);
10711	}
10712
10713	/*
10714	 * hat_page_demote() could have decreased root's p_szc.
10715	 * In this case pp's p_szc must also be smaller than pszc.
10716	 * Retry.
10717	 */
10718	if (rszc < pszc) {
10719		szc = pp->p_szc;
10720		if (szc < pszc) {
10721			mutex_exit(mtx);
10722			pszc = szc;
10723			goto again;
10724		}
10725		/*
10726		 * pp's p_szc increased after it was decreased.
10727		 * page cannot be mapped. Return current lock. The caller
10728		 * will drop it right away.
10729		 */
10730		return (mtx);
10731	}
10732
10733	/*
10734	 * root's p_szc is greater than pp's p_szc.
10735	 * hat_page_demote() is not done with all pages
10736	 * yet. Wait for it to complete.
10737	 */
10738	mutex_exit(mtx);
10739	rootpp = PP_GROUPLEADER(rootpp, rszc);
10740	mtx = SFMMU_MLSPL_MTX(type, rootpp);
10741	mutex_enter(mtx);
10742	mutex_exit(mtx);
10743	prev_rszc = rszc;
10744	goto again;
10745}
10746
10747static int
10748sfmmu_mlspl_held(struct page *pp, int type)
10749{
10750	kmutex_t	*mtx;
10751
10752	ASSERT(pp != NULL);
10753	/* The lock lives in the root page */
10754	pp = PP_PAGEROOT(pp);
10755	ASSERT(pp != NULL);
10756
10757	mtx = SFMMU_MLSPL_MTX(type, pp);
10758	return (MUTEX_HELD(mtx));
10759}
10760
10761static uint_t
10762sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10763{
10764	struct  hme_blk *hblkp;
10765
10766
10767	if (freehblkp != NULL) {
10768		mutex_enter(&freehblkp_lock);
10769		if (freehblkp != NULL) {
10770			/*
10771			 * If the current thread is owning hblk_reserve OR
10772			 * critical request from sfmmu_hblk_steal()
10773			 * let it succeed even if freehblkcnt is really low.
10774			 */
10775			if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10776				SFMMU_STAT(sf_get_free_throttle);
10777				mutex_exit(&freehblkp_lock);
10778				return (0);
10779			}
10780			freehblkcnt--;
10781			*hmeblkpp = freehblkp;
10782			hblkp = *hmeblkpp;
10783			freehblkp = hblkp->hblk_next;
10784			mutex_exit(&freehblkp_lock);
10785			hblkp->hblk_next = NULL;
10786			SFMMU_STAT(sf_get_free_success);
10787
10788			ASSERT(hblkp->hblk_hmecnt == 0);
10789			ASSERT(hblkp->hblk_vcnt == 0);
10790			ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10791
10792			return (1);
10793		}
10794		mutex_exit(&freehblkp_lock);
10795	}
10796
10797	/* Check cpu hblk pending queues */
10798	if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10799		hblkp = *hmeblkpp;
10800		hblkp->hblk_next = NULL;
10801		hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10802
10803		ASSERT(hblkp->hblk_hmecnt == 0);
10804		ASSERT(hblkp->hblk_vcnt == 0);
10805
10806		return (1);
10807	}
10808
10809	SFMMU_STAT(sf_get_free_fail);
10810	return (0);
10811}
10812
10813static uint_t
10814sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10815{
10816	struct  hme_blk *hblkp;
10817
10818	ASSERT(hmeblkp->hblk_hmecnt == 0);
10819	ASSERT(hmeblkp->hblk_vcnt == 0);
10820	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10821
10822	/*
10823	 * If the current thread is mapping into kernel space,
10824	 * let it succede even if freehblkcnt is max
10825	 * so that it will avoid freeing it to kmem.
10826	 * This will prevent stack overflow due to
10827	 * possible recursion since kmem_cache_free()
10828	 * might require creation of a slab which
10829	 * in turn needs an hmeblk to map that slab;
10830	 * let's break this vicious chain at the first
10831	 * opportunity.
10832	 */
10833	if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10834		mutex_enter(&freehblkp_lock);
10835		if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10836			SFMMU_STAT(sf_put_free_success);
10837			freehblkcnt++;
10838			hmeblkp->hblk_next = freehblkp;
10839			freehblkp = hmeblkp;
10840			mutex_exit(&freehblkp_lock);
10841			return (1);
10842		}
10843		mutex_exit(&freehblkp_lock);
10844	}
10845
10846	/*
10847	 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10848	 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10849	 * we are not in the process of mapping into kernel space.
10850	 */
10851	ASSERT(!critical);
10852	while (freehblkcnt > HBLK_RESERVE_CNT) {
10853		mutex_enter(&freehblkp_lock);
10854		if (freehblkcnt > HBLK_RESERVE_CNT) {
10855			freehblkcnt--;
10856			hblkp = freehblkp;
10857			freehblkp = hblkp->hblk_next;
10858			mutex_exit(&freehblkp_lock);
10859			ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10860			kmem_cache_free(sfmmu8_cache, hblkp);
10861			continue;
10862		}
10863		mutex_exit(&freehblkp_lock);
10864	}
10865	SFMMU_STAT(sf_put_free_fail);
10866	return (0);
10867}
10868
10869static void
10870sfmmu_hblk_swap(struct hme_blk *new)
10871{
10872	struct hme_blk *old, *hblkp, *prev;
10873	uint64_t newpa;
10874	caddr_t	base, vaddr, endaddr;
10875	struct hmehash_bucket *hmebp;
10876	struct sf_hment *osfhme, *nsfhme;
10877	page_t *pp;
10878	kmutex_t *pml;
10879	tte_t tte;
10880	struct hme_blk *list = NULL;
10881
10882#ifdef	DEBUG
10883	hmeblk_tag		hblktag;
10884	struct hme_blk		*found;
10885#endif
10886	old = HBLK_RESERVE;
10887	ASSERT(!old->hblk_shared);
10888
10889	/*
10890	 * save pa before bcopy clobbers it
10891	 */
10892	newpa = new->hblk_nextpa;
10893
10894	base = (caddr_t)get_hblk_base(old);
10895	endaddr = base + get_hblk_span(old);
10896
10897	/*
10898	 * acquire hash bucket lock.
10899	 */
10900	hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10901	    SFMMU_INVALID_SHMERID);
10902
10903	/*
10904	 * copy contents from old to new
10905	 */
10906	bcopy((void *)old, (void *)new, HME8BLK_SZ);
10907
10908	/*
10909	 * add new to hash chain
10910	 */
10911	sfmmu_hblk_hash_add(hmebp, new, newpa);
10912
10913	/*
10914	 * search hash chain for hblk_reserve; this needs to be performed
10915	 * after adding new, otherwise prev won't correspond to the hblk which
10916	 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10917	 * remove old later.
10918	 */
10919	for (prev = NULL,
10920	    hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10921	    prev = hblkp, hblkp = hblkp->hblk_next)
10922		;
10923
10924	if (hblkp != old)
10925		panic("sfmmu_hblk_swap: hblk_reserve not found");
10926
10927	/*
10928	 * p_mapping list is still pointing to hments in hblk_reserve;
10929	 * fix up p_mapping list so that they point to hments in new.
10930	 *
10931	 * Since all these mappings are created by hblk_reserve_thread
10932	 * on the way and it's using at least one of the buffers from each of
10933	 * the newly minted slabs, there is no danger of any of these
10934	 * mappings getting unloaded by another thread.
10935	 *
10936	 * tsbmiss could only modify ref/mod bits of hments in old/new.
10937	 * Since all of these hments hold mappings established by segkmem
10938	 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10939	 * have no meaning for the mappings in hblk_reserve.  hments in
10940	 * old and new are identical except for ref/mod bits.
10941	 */
10942	for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10943
10944		HBLKTOHME(osfhme, old, vaddr);
10945		sfmmu_copytte(&osfhme->hme_tte, &tte);
10946
10947		if (TTE_IS_VALID(&tte)) {
10948			if ((pp = osfhme->hme_page) == NULL)
10949				panic("sfmmu_hblk_swap: page not mapped");
10950
10951			pml = sfmmu_mlist_enter(pp);
10952
10953			if (pp != osfhme->hme_page)
10954				panic("sfmmu_hblk_swap: mapping changed");
10955
10956			HBLKTOHME(nsfhme, new, vaddr);
10957
10958			HME_ADD(nsfhme, pp);
10959			HME_SUB(osfhme, pp);
10960
10961			sfmmu_mlist_exit(pml);
10962		}
10963	}
10964
10965	/*
10966	 * remove old from hash chain
10967	 */
10968	sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10969
10970#ifdef	DEBUG
10971
10972	hblktag.htag_id = ksfmmup;
10973	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10974	hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10975	hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10976	HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10977
10978	if (found != new)
10979		panic("sfmmu_hblk_swap: new hblk not found");
10980#endif
10981
10982	SFMMU_HASH_UNLOCK(hmebp);
10983
10984	/*
10985	 * Reset hblk_reserve
10986	 */
10987	bzero((void *)old, HME8BLK_SZ);
10988	old->hblk_nextpa = va_to_pa((caddr_t)old);
10989}
10990
10991/*
10992 * Grab the mlist mutex for both pages passed in.
10993 *
10994 * low and high will be returned as pointers to the mutexes for these pages.
10995 * low refers to the mutex residing in the lower bin of the mlist hash, while
10996 * high refers to the mutex residing in the higher bin of the mlist hash.  This
10997 * is due to the locking order restrictions on the same thread grabbing
10998 * multiple mlist mutexes.  The low lock must be acquired before the high lock.
10999 *
11000 * If both pages hash to the same mutex, only grab that single mutex, and
11001 * high will be returned as NULL
11002 * If the pages hash to different bins in the hash, grab the lower addressed
11003 * lock first and then the higher addressed lock in order to follow the locking
11004 * rules involved with the same thread grabbing multiple mlist mutexes.
11005 * low and high will both have non-NULL values.
11006 */
11007static void
11008sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
11009    kmutex_t **low, kmutex_t **high)
11010{
11011	kmutex_t	*mml_targ, *mml_repl;
11012
11013	/*
11014	 * no need to do the dance around szc as in sfmmu_mlist_enter()
11015	 * because this routine is only called by hat_page_relocate() and all
11016	 * targ and repl pages are already locked EXCL so szc can't change.
11017	 */
11018
11019	mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
11020	mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
11021
11022	if (mml_targ == mml_repl) {
11023		*low = mml_targ;
11024		*high = NULL;
11025	} else {
11026		if (mml_targ < mml_repl) {
11027			*low = mml_targ;
11028			*high = mml_repl;
11029		} else {
11030			*low = mml_repl;
11031			*high = mml_targ;
11032		}
11033	}
11034
11035	mutex_enter(*low);
11036	if (*high)
11037		mutex_enter(*high);
11038}
11039
11040static void
11041sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
11042{
11043	if (high)
11044		mutex_exit(high);
11045	mutex_exit(low);
11046}
11047
11048static hatlock_t *
11049sfmmu_hat_enter(sfmmu_t *sfmmup)
11050{
11051	hatlock_t	*hatlockp;
11052
11053	if (sfmmup != ksfmmup) {
11054		hatlockp = TSB_HASH(sfmmup);
11055		mutex_enter(HATLOCK_MUTEXP(hatlockp));
11056		return (hatlockp);
11057	}
11058	return (NULL);
11059}
11060
11061static hatlock_t *
11062sfmmu_hat_tryenter(sfmmu_t *sfmmup)
11063{
11064	hatlock_t	*hatlockp;
11065
11066	if (sfmmup != ksfmmup) {
11067		hatlockp = TSB_HASH(sfmmup);
11068		if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
11069			return (NULL);
11070		return (hatlockp);
11071	}
11072	return (NULL);
11073}
11074
11075static void
11076sfmmu_hat_exit(hatlock_t *hatlockp)
11077{
11078	if (hatlockp != NULL)
11079		mutex_exit(HATLOCK_MUTEXP(hatlockp));
11080}
11081
11082static void
11083sfmmu_hat_lock_all(void)
11084{
11085	int i;
11086	for (i = 0; i < SFMMU_NUM_LOCK; i++)
11087		mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
11088}
11089
11090static void
11091sfmmu_hat_unlock_all(void)
11092{
11093	int i;
11094	for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
11095		mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
11096}
11097
11098int
11099sfmmu_hat_lock_held(sfmmu_t *sfmmup)
11100{
11101	ASSERT(sfmmup != ksfmmup);
11102	return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
11103}
11104
11105/*
11106 * Locking primitives to provide consistency between ISM unmap
11107 * and other operations.  Since ISM unmap can take a long time, we
11108 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
11109 * contention on the hatlock buckets while ISM segments are being
11110 * unmapped.  The tradeoff is that the flags don't prevent priority
11111 * inversion from occurring, so we must request kernel priority in
11112 * case we have to sleep to keep from getting buried while holding
11113 * the HAT_ISMBUSY flag set, which in turn could block other kernel
11114 * threads from running (for example, in sfmmu_uvatopfn()).
11115 */
11116static void
11117sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
11118{
11119	hatlock_t *hatlockp;
11120
11121	THREAD_KPRI_REQUEST();
11122	if (!hatlock_held)
11123		hatlockp = sfmmu_hat_enter(sfmmup);
11124	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
11125		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11126	SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
11127	if (!hatlock_held)
11128		sfmmu_hat_exit(hatlockp);
11129}
11130
11131static void
11132sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
11133{
11134	hatlock_t *hatlockp;
11135
11136	if (!hatlock_held)
11137		hatlockp = sfmmu_hat_enter(sfmmup);
11138	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
11139	SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
11140	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11141	if (!hatlock_held)
11142		sfmmu_hat_exit(hatlockp);
11143	THREAD_KPRI_RELEASE();
11144}
11145
11146/*
11147 *
11148 * Algorithm:
11149 *
11150 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
11151 *	hblks.
11152 *
11153 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
11154 *
11155 * 		(a) try to return an hblk from reserve pool of free hblks;
11156 *		(b) if the reserve pool is empty, acquire hblk_reserve_lock
11157 *		    and return hblk_reserve.
11158 *
11159 * (3) call kmem_cache_alloc() to allocate hblk;
11160 *
11161 *		(a) if hblk_reserve_lock is held by the current thread,
11162 *		    atomically replace hblk_reserve by the hblk that is
11163 *		    returned by kmem_cache_alloc; release hblk_reserve_lock
11164 *		    and call kmem_cache_alloc() again.
11165 *		(b) if reserve pool is not full, add the hblk that is
11166 *		    returned by kmem_cache_alloc to reserve pool and
11167 *		    call kmem_cache_alloc again.
11168 *
11169 */
11170static struct hme_blk *
11171sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
11172	struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
11173	uint_t flags, uint_t rid)
11174{
11175	struct hme_blk *hmeblkp = NULL;
11176	struct hme_blk *newhblkp;
11177	struct hme_blk *shw_hblkp = NULL;
11178	struct kmem_cache *sfmmu_cache = NULL;
11179	uint64_t hblkpa;
11180	ulong_t index;
11181	uint_t owner;		/* set to 1 if using hblk_reserve */
11182	uint_t forcefree;
11183	int sleep;
11184	sf_srd_t *srdp;
11185	sf_region_t *rgnp;
11186
11187	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11188	ASSERT(hblktag.htag_rid == rid);
11189	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
11190	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11191	    IS_P2ALIGNED(vaddr, TTEBYTES(size)));
11192
11193	/*
11194	 * If segkmem is not created yet, allocate from static hmeblks
11195	 * created at the end of startup_modules().  See the block comment
11196	 * in startup_modules() describing how we estimate the number of
11197	 * static hmeblks that will be needed during re-map.
11198	 */
11199	if (!hblk_alloc_dynamic) {
11200
11201		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11202
11203		if (size == TTE8K) {
11204			index = nucleus_hblk8.index;
11205			if (index >= nucleus_hblk8.len) {
11206				/*
11207				 * If we panic here, see startup_modules() to
11208				 * make sure that we are calculating the
11209				 * number of hblk8's that we need correctly.
11210				 */
11211				prom_panic("no nucleus hblk8 to allocate");
11212			}
11213			hmeblkp =
11214			    (struct hme_blk *)&nucleus_hblk8.list[index];
11215			nucleus_hblk8.index++;
11216			SFMMU_STAT(sf_hblk8_nalloc);
11217		} else {
11218			index = nucleus_hblk1.index;
11219			if (nucleus_hblk1.index >= nucleus_hblk1.len) {
11220				/*
11221				 * If we panic here, see startup_modules().
11222				 * Most likely you need to update the
11223				 * calculation of the number of hblk1 elements
11224				 * that the kernel needs to boot.
11225				 */
11226				prom_panic("no nucleus hblk1 to allocate");
11227			}
11228			hmeblkp =
11229			    (struct hme_blk *)&nucleus_hblk1.list[index];
11230			nucleus_hblk1.index++;
11231			SFMMU_STAT(sf_hblk1_nalloc);
11232		}
11233
11234		goto hblk_init;
11235	}
11236
11237	SFMMU_HASH_UNLOCK(hmebp);
11238
11239	if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
11240		if (mmu_page_sizes == max_mmu_page_sizes) {
11241			if (size < TTE256M)
11242				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11243				    size, flags);
11244		} else {
11245			if (size < TTE4M)
11246				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11247				    size, flags);
11248		}
11249	} else if (SFMMU_IS_SHMERID_VALID(rid)) {
11250		/*
11251		 * Shared hmes use per region bitmaps in rgn_hmeflag
11252		 * rather than shadow hmeblks to keep track of the
11253		 * mapping sizes which have been allocated for the region.
11254		 * Here we cleanup old invalid hmeblks with this rid,
11255		 * which may be left around by pageunload().
11256		 */
11257		int ttesz;
11258		caddr_t va;
11259		caddr_t	eva = vaddr + TTEBYTES(size);
11260
11261		ASSERT(sfmmup != KHATID);
11262
11263		srdp = sfmmup->sfmmu_srdp;
11264		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11265		rgnp = srdp->srd_hmergnp[rid];
11266		ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11267		ASSERT(rgnp->rgn_refcnt != 0);
11268		ASSERT(size <= rgnp->rgn_pgszc);
11269
11270		ttesz = HBLK_MIN_TTESZ;
11271		do {
11272			if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11273				continue;
11274			}
11275
11276			if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11277				sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11278			} else if (ttesz < size) {
11279				for (va = vaddr; va < eva;
11280				    va += TTEBYTES(ttesz)) {
11281					sfmmu_cleanup_rhblk(srdp, va, rid,
11282					    ttesz);
11283				}
11284			}
11285		} while (++ttesz <= rgnp->rgn_pgszc);
11286	}
11287
11288fill_hblk:
11289	owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11290
11291	if (owner && size == TTE8K) {
11292
11293		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11294		/*
11295		 * We are really in a tight spot. We already own
11296		 * hblk_reserve and we need another hblk.  In anticipation
11297		 * of this kind of scenario, we specifically set aside
11298		 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11299		 * by owner of hblk_reserve.
11300		 */
11301		SFMMU_STAT(sf_hblk_recurse_cnt);
11302
11303		if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11304			panic("sfmmu_hblk_alloc: reserve list is empty");
11305
11306		goto hblk_verify;
11307	}
11308
11309	ASSERT(!owner);
11310
11311	if ((flags & HAT_NO_KALLOC) == 0) {
11312
11313		sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11314		sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11315
11316		if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11317			hmeblkp = sfmmu_hblk_steal(size);
11318		} else {
11319			/*
11320			 * if we are the owner of hblk_reserve,
11321			 * swap hblk_reserve with hmeblkp and
11322			 * start a fresh life.  Hope things go
11323			 * better this time.
11324			 */
11325			if (hblk_reserve_thread == curthread) {
11326				ASSERT(sfmmu_cache == sfmmu8_cache);
11327				sfmmu_hblk_swap(hmeblkp);
11328				hblk_reserve_thread = NULL;
11329				mutex_exit(&hblk_reserve_lock);
11330				goto fill_hblk;
11331			}
11332			/*
11333			 * let's donate this hblk to our reserve list if
11334			 * we are not mapping kernel range
11335			 */
11336			if (size == TTE8K && sfmmup != KHATID) {
11337				if (sfmmu_put_free_hblk(hmeblkp, 0))
11338					goto fill_hblk;
11339			}
11340		}
11341	} else {
11342		/*
11343		 * We are here to map the slab in sfmmu8_cache; let's
11344		 * check if we could tap our reserve list; if successful,
11345		 * this will avoid the pain of going thru sfmmu_hblk_swap
11346		 */
11347		SFMMU_STAT(sf_hblk_slab_cnt);
11348		if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11349			/*
11350			 * let's start hblk_reserve dance
11351			 */
11352			SFMMU_STAT(sf_hblk_reserve_cnt);
11353			owner = 1;
11354			mutex_enter(&hblk_reserve_lock);
11355			hmeblkp = HBLK_RESERVE;
11356			hblk_reserve_thread = curthread;
11357		}
11358	}
11359
11360hblk_verify:
11361	ASSERT(hmeblkp != NULL);
11362	set_hblk_sz(hmeblkp, size);
11363	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11364	SFMMU_HASH_LOCK(hmebp);
11365	HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11366	if (newhblkp != NULL) {
11367		SFMMU_HASH_UNLOCK(hmebp);
11368		if (hmeblkp != HBLK_RESERVE) {
11369			/*
11370			 * This is really tricky!
11371			 *
11372			 * vmem_alloc(vmem_seg_arena)
11373			 *  vmem_alloc(vmem_internal_arena)
11374			 *   segkmem_alloc(heap_arena)
11375			 *    vmem_alloc(heap_arena)
11376			 *    page_create()
11377			 *    hat_memload()
11378			 *	kmem_cache_free()
11379			 *	 kmem_cache_alloc()
11380			 *	  kmem_slab_create()
11381			 *	   vmem_alloc(kmem_internal_arena)
11382			 *	    segkmem_alloc(heap_arena)
11383			 *		vmem_alloc(heap_arena)
11384			 *		page_create()
11385			 *		hat_memload()
11386			 *		  kmem_cache_free()
11387			 *		...
11388			 *
11389			 * Thus, hat_memload() could call kmem_cache_free
11390			 * for enough number of times that we could easily
11391			 * hit the bottom of the stack or run out of reserve
11392			 * list of vmem_seg structs.  So, we must donate
11393			 * this hblk to reserve list if it's allocated
11394			 * from sfmmu8_cache *and* mapping kernel range.
11395			 * We don't need to worry about freeing hmeblk1's
11396			 * to kmem since they don't map any kmem slabs.
11397			 *
11398			 * Note: When segkmem supports largepages, we must
11399			 * free hmeblk1's to reserve list as well.
11400			 */
11401			forcefree = (sfmmup == KHATID) ? 1 : 0;
11402			if (size == TTE8K &&
11403			    sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11404				goto re_verify;
11405			}
11406			ASSERT(sfmmup != KHATID);
11407			kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11408		} else {
11409			/*
11410			 * Hey! we don't need hblk_reserve any more.
11411			 */
11412			ASSERT(owner);
11413			hblk_reserve_thread = NULL;
11414			mutex_exit(&hblk_reserve_lock);
11415			owner = 0;
11416		}
11417re_verify:
11418		/*
11419		 * let's check if the goodies are still present
11420		 */
11421		SFMMU_HASH_LOCK(hmebp);
11422		HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11423		if (newhblkp != NULL) {
11424			/*
11425			 * return newhblkp if it's not hblk_reserve;
11426			 * if newhblkp is hblk_reserve, return it
11427			 * _only if_ we are the owner of hblk_reserve.
11428			 */
11429			if (newhblkp != HBLK_RESERVE || owner) {
11430				ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11431				    newhblkp->hblk_shared);
11432				ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11433				    !newhblkp->hblk_shared);
11434				return (newhblkp);
11435			} else {
11436				/*
11437				 * we just hit hblk_reserve in the hash and
11438				 * we are not the owner of that;
11439				 *
11440				 * block until hblk_reserve_thread completes
11441				 * swapping hblk_reserve and try the dance
11442				 * once again.
11443				 */
11444				SFMMU_HASH_UNLOCK(hmebp);
11445				mutex_enter(&hblk_reserve_lock);
11446				mutex_exit(&hblk_reserve_lock);
11447				SFMMU_STAT(sf_hblk_reserve_hit);
11448				goto fill_hblk;
11449			}
11450		} else {
11451			/*
11452			 * it's no more! try the dance once again.
11453			 */
11454			SFMMU_HASH_UNLOCK(hmebp);
11455			goto fill_hblk;
11456		}
11457	}
11458
11459hblk_init:
11460	if (SFMMU_IS_SHMERID_VALID(rid)) {
11461		uint16_t tteflag = 0x1 <<
11462		    ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11463
11464		if (!(rgnp->rgn_hmeflags & tteflag)) {
11465			atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11466		}
11467		hmeblkp->hblk_shared = 1;
11468	} else {
11469		hmeblkp->hblk_shared = 0;
11470	}
11471	set_hblk_sz(hmeblkp, size);
11472	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11473	hmeblkp->hblk_next = (struct hme_blk *)NULL;
11474	hmeblkp->hblk_tag = hblktag;
11475	hmeblkp->hblk_shadow = shw_hblkp;
11476	hblkpa = hmeblkp->hblk_nextpa;
11477	hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11478
11479	ASSERT(get_hblk_ttesz(hmeblkp) == size);
11480	ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11481	ASSERT(hmeblkp->hblk_hmecnt == 0);
11482	ASSERT(hmeblkp->hblk_vcnt == 0);
11483	ASSERT(hmeblkp->hblk_lckcnt == 0);
11484	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11485	sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11486	return (hmeblkp);
11487}
11488
11489/*
11490 * This function cleans up the hme_blk and returns it to the free list.
11491 */
11492/* ARGSUSED */
11493static void
11494sfmmu_hblk_free(struct hme_blk **listp)
11495{
11496	struct hme_blk *hmeblkp, *next_hmeblkp;
11497	int		size;
11498	uint_t		critical;
11499	uint64_t	hblkpa;
11500
11501	ASSERT(*listp != NULL);
11502
11503	hmeblkp = *listp;
11504	while (hmeblkp != NULL) {
11505		next_hmeblkp = hmeblkp->hblk_next;
11506		ASSERT(!hmeblkp->hblk_hmecnt);
11507		ASSERT(!hmeblkp->hblk_vcnt);
11508		ASSERT(!hmeblkp->hblk_lckcnt);
11509		ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11510		ASSERT(hmeblkp->hblk_shared == 0);
11511		ASSERT(hmeblkp->hblk_shw_bit == 0);
11512		ASSERT(hmeblkp->hblk_shadow == NULL);
11513
11514		hblkpa = va_to_pa((caddr_t)hmeblkp);
11515		ASSERT(hblkpa != (uint64_t)-1);
11516		critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11517
11518		size = get_hblk_ttesz(hmeblkp);
11519		hmeblkp->hblk_next = NULL;
11520		hmeblkp->hblk_nextpa = hblkpa;
11521
11522		if (hmeblkp->hblk_nuc_bit == 0) {
11523
11524			if (size != TTE8K ||
11525			    !sfmmu_put_free_hblk(hmeblkp, critical))
11526				kmem_cache_free(get_hblk_cache(hmeblkp),
11527				    hmeblkp);
11528		}
11529		hmeblkp = next_hmeblkp;
11530	}
11531}
11532
11533#define	BUCKETS_TO_SEARCH_BEFORE_UNLOAD	30
11534#define	SFMMU_HBLK_STEAL_THRESHOLD 5
11535
11536static uint_t sfmmu_hblk_steal_twice;
11537static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11538
11539/*
11540 * Steal a hmeblk from user or kernel hme hash lists.
11541 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11542 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11543 * tap into critical reserve of freehblkp.
11544 * Note: We remain looping in this routine until we find one.
11545 */
11546static struct hme_blk *
11547sfmmu_hblk_steal(int size)
11548{
11549	static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11550	struct hmehash_bucket *hmebp;
11551	struct hme_blk *hmeblkp = NULL, *pr_hblk;
11552	uint64_t hblkpa;
11553	int i;
11554	uint_t loop_cnt = 0, critical;
11555
11556	for (;;) {
11557		/* Check cpu hblk pending queues */
11558		if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11559			hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11560			ASSERT(hmeblkp->hblk_hmecnt == 0);
11561			ASSERT(hmeblkp->hblk_vcnt == 0);
11562			return (hmeblkp);
11563		}
11564
11565		if (size == TTE8K) {
11566			critical =
11567			    (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11568			if (sfmmu_get_free_hblk(&hmeblkp, critical))
11569				return (hmeblkp);
11570		}
11571
11572		hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11573		    uhmehash_steal_hand;
11574		ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11575
11576		for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11577		    BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11578			SFMMU_HASH_LOCK(hmebp);
11579			hmeblkp = hmebp->hmeblkp;
11580			hblkpa = hmebp->hmeh_nextpa;
11581			pr_hblk = NULL;
11582			while (hmeblkp) {
11583				/*
11584				 * check if it is a hmeblk that is not locked
11585				 * and not shared. skip shadow hmeblks with
11586				 * shadow_mask set i.e valid count non zero.
11587				 */
11588				if ((get_hblk_ttesz(hmeblkp) == size) &&
11589				    (hmeblkp->hblk_shw_bit == 0 ||
11590				    hmeblkp->hblk_vcnt == 0) &&
11591				    (hmeblkp->hblk_lckcnt == 0)) {
11592					/*
11593					 * there is a high probability that we
11594					 * will find a free one. search some
11595					 * buckets for a free hmeblk initially
11596					 * before unloading a valid hmeblk.
11597					 */
11598					if ((hmeblkp->hblk_vcnt == 0 &&
11599					    hmeblkp->hblk_hmecnt == 0) || (i >=
11600					    BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11601						if (sfmmu_steal_this_hblk(hmebp,
11602						    hmeblkp, hblkpa, pr_hblk)) {
11603							/*
11604							 * Hblk is unloaded
11605							 * successfully
11606							 */
11607							break;
11608						}
11609					}
11610				}
11611				pr_hblk = hmeblkp;
11612				hblkpa = hmeblkp->hblk_nextpa;
11613				hmeblkp = hmeblkp->hblk_next;
11614			}
11615
11616			SFMMU_HASH_UNLOCK(hmebp);
11617			if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11618				hmebp = uhme_hash;
11619		}
11620		uhmehash_steal_hand = hmebp;
11621
11622		if (hmeblkp != NULL)
11623			break;
11624
11625		/*
11626		 * in the worst case, look for a free one in the kernel
11627		 * hash table.
11628		 */
11629		for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11630			SFMMU_HASH_LOCK(hmebp);
11631			hmeblkp = hmebp->hmeblkp;
11632			hblkpa = hmebp->hmeh_nextpa;
11633			pr_hblk = NULL;
11634			while (hmeblkp) {
11635				/*
11636				 * check if it is free hmeblk
11637				 */
11638				if ((get_hblk_ttesz(hmeblkp) == size) &&
11639				    (hmeblkp->hblk_lckcnt == 0) &&
11640				    (hmeblkp->hblk_vcnt == 0) &&
11641				    (hmeblkp->hblk_hmecnt == 0)) {
11642					if (sfmmu_steal_this_hblk(hmebp,
11643					    hmeblkp, hblkpa, pr_hblk)) {
11644						break;
11645					} else {
11646						/*
11647						 * Cannot fail since we have
11648						 * hash lock.
11649						 */
11650						panic("fail to steal?");
11651					}
11652				}
11653
11654				pr_hblk = hmeblkp;
11655				hblkpa = hmeblkp->hblk_nextpa;
11656				hmeblkp = hmeblkp->hblk_next;
11657			}
11658
11659			SFMMU_HASH_UNLOCK(hmebp);
11660			if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11661				hmebp = khme_hash;
11662		}
11663
11664		if (hmeblkp != NULL)
11665			break;
11666		sfmmu_hblk_steal_twice++;
11667	}
11668	return (hmeblkp);
11669}
11670
11671/*
11672 * This routine does real work to prepare a hblk to be "stolen" by
11673 * unloading the mappings, updating shadow counts ....
11674 * It returns 1 if the block is ready to be reused (stolen), or 0
11675 * means the block cannot be stolen yet- pageunload is still working
11676 * on this hblk.
11677 */
11678static int
11679sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11680	uint64_t hblkpa, struct hme_blk *pr_hblk)
11681{
11682	int shw_size, vshift;
11683	struct hme_blk *shw_hblkp;
11684	caddr_t vaddr;
11685	uint_t shw_mask, newshw_mask;
11686	struct hme_blk *list = NULL;
11687
11688	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11689
11690	/*
11691	 * check if the hmeblk is free, unload if necessary
11692	 */
11693	if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11694		sfmmu_t *sfmmup;
11695		demap_range_t dmr;
11696
11697		sfmmup = hblktosfmmu(hmeblkp);
11698		if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11699			return (0);
11700		}
11701		DEMAP_RANGE_INIT(sfmmup, &dmr);
11702		(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11703		    (caddr_t)get_hblk_base(hmeblkp),
11704		    get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11705		DEMAP_RANGE_FLUSH(&dmr);
11706		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11707			/*
11708			 * Pageunload is working on the same hblk.
11709			 */
11710			return (0);
11711		}
11712
11713		sfmmu_hblk_steal_unload_count++;
11714	}
11715
11716	ASSERT(hmeblkp->hblk_lckcnt == 0);
11717	ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11718
11719	sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11720	hmeblkp->hblk_nextpa = hblkpa;
11721
11722	shw_hblkp = hmeblkp->hblk_shadow;
11723	if (shw_hblkp) {
11724		ASSERT(!hmeblkp->hblk_shared);
11725		shw_size = get_hblk_ttesz(shw_hblkp);
11726		vaddr = (caddr_t)get_hblk_base(hmeblkp);
11727		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11728		ASSERT(vshift < 8);
11729		/*
11730		 * Atomically clear shadow mask bit
11731		 */
11732		do {
11733			shw_mask = shw_hblkp->hblk_shw_mask;
11734			ASSERT(shw_mask & (1 << vshift));
11735			newshw_mask = shw_mask & ~(1 << vshift);
11736			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
11737			    shw_mask, newshw_mask);
11738		} while (newshw_mask != shw_mask);
11739		hmeblkp->hblk_shadow = NULL;
11740	}
11741
11742	/*
11743	 * remove shadow bit if we are stealing an unused shadow hmeblk.
11744	 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11745	 * we are indeed allocating a shadow hmeblk.
11746	 */
11747	hmeblkp->hblk_shw_bit = 0;
11748
11749	if (hmeblkp->hblk_shared) {
11750		sf_srd_t	*srdp;
11751		sf_region_t	*rgnp;
11752		uint_t		rid;
11753
11754		srdp = hblktosrd(hmeblkp);
11755		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11756		rid = hmeblkp->hblk_tag.htag_rid;
11757		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11758		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11759		rgnp = srdp->srd_hmergnp[rid];
11760		ASSERT(rgnp != NULL);
11761		SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11762		hmeblkp->hblk_shared = 0;
11763	}
11764
11765	sfmmu_hblk_steal_count++;
11766	SFMMU_STAT(sf_steal_count);
11767
11768	return (1);
11769}
11770
11771struct hme_blk *
11772sfmmu_hmetohblk(struct sf_hment *sfhme)
11773{
11774	struct hme_blk *hmeblkp;
11775	struct sf_hment *sfhme0;
11776	struct hme_blk *hblk_dummy = 0;
11777
11778	/*
11779	 * No dummy sf_hments, please.
11780	 */
11781	ASSERT(sfhme->hme_tte.ll != 0);
11782
11783	sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11784	hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11785	    (uintptr_t)&hblk_dummy->hblk_hme[0]);
11786
11787	return (hmeblkp);
11788}
11789
11790/*
11791 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11792 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11793 * KM_SLEEP allocation.
11794 *
11795 * Return 0 on success, -1 otherwise.
11796 */
11797static void
11798sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11799{
11800	struct tsb_info *tsbinfop, *next;
11801	tsb_replace_rc_t rc;
11802	boolean_t gotfirst = B_FALSE;
11803
11804	ASSERT(sfmmup != ksfmmup);
11805	ASSERT(sfmmu_hat_lock_held(sfmmup));
11806
11807	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11808		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11809	}
11810
11811	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11812		SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11813	} else {
11814		return;
11815	}
11816
11817	ASSERT(sfmmup->sfmmu_tsb != NULL);
11818
11819	/*
11820	 * Loop over all tsbinfo's replacing them with ones that actually have
11821	 * a TSB.  If any of the replacements ever fail, bail out of the loop.
11822	 */
11823	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11824		ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11825		next = tsbinfop->tsb_next;
11826		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11827		    hatlockp, TSB_SWAPIN);
11828		if (rc != TSB_SUCCESS) {
11829			break;
11830		}
11831		gotfirst = B_TRUE;
11832	}
11833
11834	switch (rc) {
11835	case TSB_SUCCESS:
11836		SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11837		cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11838		return;
11839	case TSB_LOSTRACE:
11840		break;
11841	case TSB_ALLOCFAIL:
11842		break;
11843	default:
11844		panic("sfmmu_replace_tsb returned unrecognized failure code "
11845		    "%d", rc);
11846	}
11847
11848	/*
11849	 * In this case, we failed to get one of our TSBs.  If we failed to
11850	 * get the first TSB, get one of minimum size (8KB).  Walk the list
11851	 * and throw away the tsbinfos, starting where the allocation failed;
11852	 * we can get by with just one TSB as long as we don't leave the
11853	 * SWAPPED tsbinfo structures lying around.
11854	 */
11855	tsbinfop = sfmmup->sfmmu_tsb;
11856	next = tsbinfop->tsb_next;
11857	tsbinfop->tsb_next = NULL;
11858
11859	sfmmu_hat_exit(hatlockp);
11860	for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11861		next = tsbinfop->tsb_next;
11862		sfmmu_tsbinfo_free(tsbinfop);
11863	}
11864	hatlockp = sfmmu_hat_enter(sfmmup);
11865
11866	/*
11867	 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11868	 * pages.
11869	 */
11870	if (!gotfirst) {
11871		tsbinfop = sfmmup->sfmmu_tsb;
11872		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11873		    hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11874		ASSERT(rc == TSB_SUCCESS);
11875	}
11876
11877	SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11878	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11879}
11880
11881static int
11882sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11883{
11884	ulong_t bix = 0;
11885	uint_t rid;
11886	sf_region_t *rgnp;
11887
11888	ASSERT(srdp != NULL);
11889	ASSERT(srdp->srd_refcnt != 0);
11890
11891	w <<= BT_ULSHIFT;
11892	while (bmw) {
11893		if (!(bmw & 0x1)) {
11894			bix++;
11895			bmw >>= 1;
11896			continue;
11897		}
11898		rid = w | bix;
11899		rgnp = srdp->srd_hmergnp[rid];
11900		ASSERT(rgnp->rgn_refcnt > 0);
11901		ASSERT(rgnp->rgn_id == rid);
11902		if (addr < rgnp->rgn_saddr ||
11903		    addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11904			bix++;
11905			bmw >>= 1;
11906		} else {
11907			return (1);
11908		}
11909	}
11910	return (0);
11911}
11912
11913/*
11914 * Handle exceptions for low level tsb_handler.
11915 *
11916 * There are many scenarios that could land us here:
11917 *
11918 * If the context is invalid we land here. The context can be invalid
11919 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11920 * perform a wrap around operation in order to allocate a new context.
11921 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11922 * TSBs configuration is changeing for this process and we are forced into
11923 * here to do a syncronization operation. If the context is valid we can
11924 * be here from window trap hanlder. In this case just call trap to handle
11925 * the fault.
11926 *
11927 * Note that the process will run in INVALID_CONTEXT before
11928 * faulting into here and subsequently loading the MMU registers
11929 * (including the TSB base register) associated with this process.
11930 * For this reason, the trap handlers must all test for
11931 * INVALID_CONTEXT before attempting to access any registers other
11932 * than the context registers.
11933 */
11934void
11935sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11936{
11937	sfmmu_t *sfmmup, *shsfmmup;
11938	uint_t ctxtype;
11939	klwp_id_t lwp;
11940	char lwp_save_state;
11941	hatlock_t *hatlockp, *shatlockp;
11942	struct tsb_info *tsbinfop;
11943	struct tsbmiss *tsbmp;
11944	sf_scd_t *scdp;
11945
11946	SFMMU_STAT(sf_tsb_exceptions);
11947	SFMMU_MMU_STAT(mmu_tsb_exceptions);
11948	sfmmup = astosfmmu(curthread->t_procp->p_as);
11949	/*
11950	 * note that in sun4u, tagacces register contains ctxnum
11951	 * while sun4v passes ctxtype in the tagaccess register.
11952	 */
11953	ctxtype = tagaccess & TAGACC_CTX_MASK;
11954
11955	ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11956	ASSERT(sfmmup->sfmmu_ismhat == 0);
11957	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11958	    ctxtype == INVALID_CONTEXT);
11959
11960	if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11961		/*
11962		 * We may land here because shme bitmap and pagesize
11963		 * flags are updated lazily in tsbmiss area on other cpus.
11964		 * If we detect here that tsbmiss area is out of sync with
11965		 * sfmmu update it and retry the trapped instruction.
11966		 * Otherwise call trap().
11967		 */
11968		int ret = 0;
11969		uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11970		caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11971
11972		/*
11973		 * Must set lwp state to LWP_SYS before
11974		 * trying to acquire any adaptive lock
11975		 */
11976		lwp = ttolwp(curthread);
11977		ASSERT(lwp);
11978		lwp_save_state = lwp->lwp_state;
11979		lwp->lwp_state = LWP_SYS;
11980
11981		hatlockp = sfmmu_hat_enter(sfmmup);
11982		kpreempt_disable();
11983		tsbmp = &tsbmiss_area[CPU->cpu_id];
11984		ASSERT(sfmmup == tsbmp->usfmmup);
11985		if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11986		    ~tteflag_mask) ||
11987		    ((tsbmp->uhat_rtteflags ^  sfmmup->sfmmu_rtteflags) &
11988		    ~tteflag_mask)) {
11989			tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11990			tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11991			ret = 1;
11992		}
11993		if (sfmmup->sfmmu_srdp != NULL) {
11994			ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11995			ulong_t *tm = tsbmp->shmermap;
11996			ulong_t i;
11997			for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11998				ulong_t d = tm[i] ^ sm[i];
11999				if (d) {
12000					if (d & sm[i]) {
12001						if (!ret && sfmmu_is_rgnva(
12002						    sfmmup->sfmmu_srdp,
12003						    addr, i, d & sm[i])) {
12004							ret = 1;
12005						}
12006					}
12007					tm[i] = sm[i];
12008				}
12009			}
12010		}
12011		kpreempt_enable();
12012		sfmmu_hat_exit(hatlockp);
12013		lwp->lwp_state = lwp_save_state;
12014		if (ret) {
12015			return;
12016		}
12017	} else if (ctxtype == INVALID_CONTEXT) {
12018		/*
12019		 * First, make sure we come out of here with a valid ctx,
12020		 * since if we don't get one we'll simply loop on the
12021		 * faulting instruction.
12022		 *
12023		 * If the ISM mappings are changing, the TSB is relocated,
12024		 * the process is swapped, the process is joining SCD or
12025		 * leaving SCD or shared regions we serialize behind the
12026		 * controlling thread with hat lock, sfmmu_flags and
12027		 * sfmmu_tsb_cv condition variable.
12028		 */
12029
12030		/*
12031		 * Must set lwp state to LWP_SYS before
12032		 * trying to acquire any adaptive lock
12033		 */
12034		lwp = ttolwp(curthread);
12035		ASSERT(lwp);
12036		lwp_save_state = lwp->lwp_state;
12037		lwp->lwp_state = LWP_SYS;
12038
12039		hatlockp = sfmmu_hat_enter(sfmmup);
12040retry:
12041		if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
12042			shsfmmup = scdp->scd_sfmmup;
12043			ASSERT(shsfmmup != NULL);
12044
12045			for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
12046			    tsbinfop = tsbinfop->tsb_next) {
12047				if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
12048					/* drop the private hat lock */
12049					sfmmu_hat_exit(hatlockp);
12050					/* acquire the shared hat lock */
12051					shatlockp = sfmmu_hat_enter(shsfmmup);
12052					/*
12053					 * recheck to see if anything changed
12054					 * after we drop the private hat lock.
12055					 */
12056					if (sfmmup->sfmmu_scdp == scdp &&
12057					    shsfmmup == scdp->scd_sfmmup) {
12058						sfmmu_tsb_chk_reloc(shsfmmup,
12059						    shatlockp);
12060					}
12061					sfmmu_hat_exit(shatlockp);
12062					hatlockp = sfmmu_hat_enter(sfmmup);
12063					goto retry;
12064				}
12065			}
12066		}
12067
12068		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
12069		    tsbinfop = tsbinfop->tsb_next) {
12070			if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
12071				cv_wait(&sfmmup->sfmmu_tsb_cv,
12072				    HATLOCK_MUTEXP(hatlockp));
12073				goto retry;
12074			}
12075		}
12076
12077		/*
12078		 * Wait for ISM maps to be updated.
12079		 */
12080		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12081			cv_wait(&sfmmup->sfmmu_tsb_cv,
12082			    HATLOCK_MUTEXP(hatlockp));
12083			goto retry;
12084		}
12085
12086		/* Is this process joining an SCD? */
12087		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12088			/*
12089			 * Flush private TSB and setup shared TSB.
12090			 * sfmmu_finish_join_scd() does not drop the
12091			 * hat lock.
12092			 */
12093			sfmmu_finish_join_scd(sfmmup);
12094			SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
12095		}
12096
12097		/*
12098		 * If we're swapping in, get TSB(s).  Note that we must do
12099		 * this before we get a ctx or load the MMU state.  Once
12100		 * we swap in we have to recheck to make sure the TSB(s) and
12101		 * ISM mappings didn't change while we slept.
12102		 */
12103		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
12104			sfmmu_tsb_swapin(sfmmup, hatlockp);
12105			goto retry;
12106		}
12107
12108		sfmmu_get_ctx(sfmmup);
12109
12110		sfmmu_hat_exit(hatlockp);
12111		/*
12112		 * Must restore lwp_state if not calling
12113		 * trap() for further processing. Restore
12114		 * it anyway.
12115		 */
12116		lwp->lwp_state = lwp_save_state;
12117		return;
12118	}
12119	trap(rp, (caddr_t)tagaccess, traptype, 0);
12120}
12121
12122static void
12123sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
12124{
12125	struct tsb_info *tp;
12126
12127	ASSERT(sfmmu_hat_lock_held(sfmmup));
12128
12129	for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
12130		if (tp->tsb_flags & TSB_RELOC_FLAG) {
12131			cv_wait(&sfmmup->sfmmu_tsb_cv,
12132			    HATLOCK_MUTEXP(hatlockp));
12133			break;
12134		}
12135	}
12136}
12137
12138/*
12139 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
12140 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
12141 * rather than spinning to avoid send mondo timeouts with
12142 * interrupts enabled. When the lock is acquired it is immediately
12143 * released and we return back to sfmmu_vatopfn just after
12144 * the GET_TTE call.
12145 */
12146void
12147sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
12148{
12149	struct page	**pp;
12150
12151	(void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12152	as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12153}
12154
12155/*
12156 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
12157 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
12158 * cross traps which cannot be handled while spinning in the
12159 * trap handlers. Simply enter and exit the kpr_suspendlock spin
12160 * mutex, which is held by the holder of the suspend bit, and then
12161 * retry the trapped instruction after unwinding.
12162 */
12163/*ARGSUSED*/
12164void
12165sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
12166{
12167	ASSERT(curthread != kreloc_thread);
12168	mutex_enter(&kpr_suspendlock);
12169	mutex_exit(&kpr_suspendlock);
12170}
12171
12172/*
12173 * This routine could be optimized to reduce the number of xcalls by flushing
12174 * the entire TLBs if region reference count is above some threshold but the
12175 * tradeoff will depend on the size of the TLB. So for now flush the specific
12176 * page a context at a time.
12177 *
12178 * If uselocks is 0 then it's called after all cpus were captured and all the
12179 * hat locks were taken. In this case don't take the region lock by relying on
12180 * the order of list region update operations in hat_join_region(),
12181 * hat_leave_region() and hat_dup_region(). The ordering in those routines
12182 * guarantees that list is always forward walkable and reaches active sfmmus
12183 * regardless of where xc_attention() captures a cpu.
12184 */
12185cpuset_t
12186sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
12187    struct hme_blk *hmeblkp, int uselocks)
12188{
12189	sfmmu_t	*sfmmup;
12190	cpuset_t cpuset;
12191	cpuset_t rcpuset;
12192	hatlock_t *hatlockp;
12193	uint_t rid = rgnp->rgn_id;
12194	sf_rgn_link_t *rlink;
12195	sf_scd_t *scdp;
12196
12197	ASSERT(hmeblkp->hblk_shared);
12198	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
12199	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
12200
12201	CPUSET_ZERO(rcpuset);
12202	if (uselocks) {
12203		mutex_enter(&rgnp->rgn_mutex);
12204	}
12205	sfmmup = rgnp->rgn_sfmmu_head;
12206	while (sfmmup != NULL) {
12207		if (uselocks) {
12208			hatlockp = sfmmu_hat_enter(sfmmup);
12209		}
12210
12211		/*
12212		 * When an SCD is created the SCD hat is linked on the sfmmu
12213		 * region lists for each hme region which is part of the
12214		 * SCD. If we find an SCD hat, when walking these lists,
12215		 * then we flush the shared TSBs, if we find a private hat,
12216		 * which is part of an SCD, but where the region
12217		 * is not part of the SCD then we flush the private TSBs.
12218		 */
12219		if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12220		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12221			scdp = sfmmup->sfmmu_scdp;
12222			if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
12223				if (uselocks) {
12224					sfmmu_hat_exit(hatlockp);
12225				}
12226				goto next;
12227			}
12228		}
12229
12230		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12231
12232		kpreempt_disable();
12233		cpuset = sfmmup->sfmmu_cpusran;
12234		CPUSET_AND(cpuset, cpu_ready_set);
12235		CPUSET_DEL(cpuset, CPU->cpu_id);
12236		SFMMU_XCALL_STATS(sfmmup);
12237		xt_some(cpuset, vtag_flushpage_tl1,
12238		    (uint64_t)addr, (uint64_t)sfmmup);
12239		vtag_flushpage(addr, (uint64_t)sfmmup);
12240		if (uselocks) {
12241			sfmmu_hat_exit(hatlockp);
12242		}
12243		kpreempt_enable();
12244		CPUSET_OR(rcpuset, cpuset);
12245
12246next:
12247		/* LINTED: constant in conditional context */
12248		SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12249		ASSERT(rlink != NULL);
12250		sfmmup = rlink->next;
12251	}
12252	if (uselocks) {
12253		mutex_exit(&rgnp->rgn_mutex);
12254	}
12255	return (rcpuset);
12256}
12257
12258/*
12259 * This routine takes an sfmmu pointer and the va for an adddress in an
12260 * ISM region as input and returns the corresponding region id in ism_rid.
12261 * The return value of 1 indicates that a region has been found and ism_rid
12262 * is valid, otherwise 0 is returned.
12263 */
12264static int
12265find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12266{
12267	ism_blk_t	*ism_blkp;
12268	int		i;
12269	ism_map_t	*ism_map;
12270#ifdef DEBUG
12271	struct hat	*ism_hatid;
12272#endif
12273	ASSERT(sfmmu_hat_lock_held(sfmmup));
12274
12275	ism_blkp = sfmmup->sfmmu_iblk;
12276	while (ism_blkp != NULL) {
12277		ism_map = ism_blkp->iblk_maps;
12278		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12279			if ((va >= ism_start(ism_map[i])) &&
12280			    (va < ism_end(ism_map[i]))) {
12281
12282				*ism_rid = ism_map[i].imap_rid;
12283#ifdef DEBUG
12284				ism_hatid = ism_map[i].imap_ismhat;
12285				ASSERT(ism_hatid == ism_sfmmup);
12286				ASSERT(ism_hatid->sfmmu_ismhat);
12287#endif
12288				return (1);
12289			}
12290		}
12291		ism_blkp = ism_blkp->iblk_next;
12292	}
12293	return (0);
12294}
12295
12296/*
12297 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12298 * This routine may be called with all cpu's captured. Therefore, the
12299 * caller is responsible for holding all locks and disabling kernel
12300 * preemption.
12301 */
12302/* ARGSUSED */
12303static void
12304sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12305	struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12306{
12307	cpuset_t 	cpuset;
12308	caddr_t 	va;
12309	ism_ment_t	*ment;
12310	sfmmu_t		*sfmmup;
12311#ifdef VAC
12312	int 		vcolor;
12313#endif
12314
12315	sf_scd_t	*scdp;
12316	uint_t		ism_rid;
12317
12318	ASSERT(!hmeblkp->hblk_shared);
12319	/*
12320	 * Walk the ism_hat's mapping list and flush the page
12321	 * from every hat sharing this ism_hat. This routine
12322	 * may be called while all cpu's have been captured.
12323	 * Therefore we can't attempt to grab any locks. For now
12324	 * this means we will protect the ism mapping list under
12325	 * a single lock which will be grabbed by the caller.
12326	 * If hat_share/unshare scalibility becomes a performance
12327	 * problem then we may need to re-think ism mapping list locking.
12328	 */
12329	ASSERT(ism_sfmmup->sfmmu_ismhat);
12330	ASSERT(MUTEX_HELD(&ism_mlist_lock));
12331	addr = addr - ISMID_STARTADDR;
12332
12333	for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12334
12335		sfmmup = ment->iment_hat;
12336
12337		va = ment->iment_base_va;
12338		va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
12339
12340		/*
12341		 * When an SCD is created the SCD hat is linked on the ism
12342		 * mapping lists for each ISM segment which is part of the
12343		 * SCD. If we find an SCD hat, when walking these lists,
12344		 * then we flush the shared TSBs, if we find a private hat,
12345		 * which is part of an SCD, but where the region
12346		 * corresponding to this va is not part of the SCD then we
12347		 * flush the private TSBs.
12348		 */
12349		if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12350		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12351		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12352			if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12353			    &ism_rid)) {
12354				cmn_err(CE_PANIC,
12355				    "can't find matching ISM rid!");
12356			}
12357
12358			scdp = sfmmup->sfmmu_scdp;
12359			if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12360			    SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12361			    ism_rid)) {
12362				continue;
12363			}
12364		}
12365		SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12366
12367		cpuset = sfmmup->sfmmu_cpusran;
12368		CPUSET_AND(cpuset, cpu_ready_set);
12369		CPUSET_DEL(cpuset, CPU->cpu_id);
12370		SFMMU_XCALL_STATS(sfmmup);
12371		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12372		    (uint64_t)sfmmup);
12373		vtag_flushpage(va, (uint64_t)sfmmup);
12374
12375#ifdef VAC
12376		/*
12377		 * Flush D$
12378		 * When flushing D$ we must flush all
12379		 * cpu's. See sfmmu_cache_flush().
12380		 */
12381		if (cache_flush_flag == CACHE_FLUSH) {
12382			cpuset = cpu_ready_set;
12383			CPUSET_DEL(cpuset, CPU->cpu_id);
12384
12385			SFMMU_XCALL_STATS(sfmmup);
12386			vcolor = addr_to_vcolor(va);
12387			xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12388			vac_flushpage(pfnum, vcolor);
12389		}
12390#endif	/* VAC */
12391	}
12392}
12393
12394/*
12395 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12396 * a particular virtual address and ctx.  If noflush is set we do not
12397 * flush the TLB/TSB.  This function may or may not be called with the
12398 * HAT lock held.
12399 */
12400static void
12401sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12402	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12403	int hat_lock_held)
12404{
12405#ifdef VAC
12406	int vcolor;
12407#endif
12408	cpuset_t cpuset;
12409	hatlock_t *hatlockp;
12410
12411	ASSERT(!hmeblkp->hblk_shared);
12412
12413#if defined(lint) && !defined(VAC)
12414	pfnum = pfnum;
12415	cpu_flag = cpu_flag;
12416	cache_flush_flag = cache_flush_flag;
12417#endif
12418
12419	/*
12420	 * There is no longer a need to protect against ctx being
12421	 * stolen here since we don't store the ctx in the TSB anymore.
12422	 */
12423#ifdef VAC
12424	vcolor = addr_to_vcolor(addr);
12425#endif
12426
12427	/*
12428	 * We must hold the hat lock during the flush of TLB,
12429	 * to avoid a race with sfmmu_invalidate_ctx(), where
12430	 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12431	 * causing TLB demap routine to skip flush on that MMU.
12432	 * If the context on a MMU has already been set to
12433	 * INVALID_CONTEXT, we just get an extra flush on
12434	 * that MMU.
12435	 */
12436	if (!hat_lock_held && !tlb_noflush)
12437		hatlockp = sfmmu_hat_enter(sfmmup);
12438
12439	kpreempt_disable();
12440	if (!tlb_noflush) {
12441		/*
12442		 * Flush the TSB and TLB.
12443		 */
12444		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12445
12446		cpuset = sfmmup->sfmmu_cpusran;
12447		CPUSET_AND(cpuset, cpu_ready_set);
12448		CPUSET_DEL(cpuset, CPU->cpu_id);
12449
12450		SFMMU_XCALL_STATS(sfmmup);
12451
12452		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12453		    (uint64_t)sfmmup);
12454
12455		vtag_flushpage(addr, (uint64_t)sfmmup);
12456	}
12457
12458	if (!hat_lock_held && !tlb_noflush)
12459		sfmmu_hat_exit(hatlockp);
12460
12461#ifdef VAC
12462	/*
12463	 * Flush the D$
12464	 *
12465	 * Even if the ctx is stolen, we need to flush the
12466	 * cache. Our ctx stealer only flushes the TLBs.
12467	 */
12468	if (cache_flush_flag == CACHE_FLUSH) {
12469		if (cpu_flag & FLUSH_ALL_CPUS) {
12470			cpuset = cpu_ready_set;
12471		} else {
12472			cpuset = sfmmup->sfmmu_cpusran;
12473			CPUSET_AND(cpuset, cpu_ready_set);
12474		}
12475		CPUSET_DEL(cpuset, CPU->cpu_id);
12476		SFMMU_XCALL_STATS(sfmmup);
12477		xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12478		vac_flushpage(pfnum, vcolor);
12479	}
12480#endif	/* VAC */
12481	kpreempt_enable();
12482}
12483
12484/*
12485 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12486 * address and ctx.  If noflush is set we do not currently do anything.
12487 * This function may or may not be called with the HAT lock held.
12488 */
12489static void
12490sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12491	int tlb_noflush, int hat_lock_held)
12492{
12493	cpuset_t cpuset;
12494	hatlock_t *hatlockp;
12495
12496	ASSERT(!hmeblkp->hblk_shared);
12497
12498	/*
12499	 * If the process is exiting we have nothing to do.
12500	 */
12501	if (tlb_noflush)
12502		return;
12503
12504	/*
12505	 * Flush TSB.
12506	 */
12507	if (!hat_lock_held)
12508		hatlockp = sfmmu_hat_enter(sfmmup);
12509	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12510
12511	kpreempt_disable();
12512
12513	cpuset = sfmmup->sfmmu_cpusran;
12514	CPUSET_AND(cpuset, cpu_ready_set);
12515	CPUSET_DEL(cpuset, CPU->cpu_id);
12516
12517	SFMMU_XCALL_STATS(sfmmup);
12518	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12519
12520	vtag_flushpage(addr, (uint64_t)sfmmup);
12521
12522	if (!hat_lock_held)
12523		sfmmu_hat_exit(hatlockp);
12524
12525	kpreempt_enable();
12526
12527}
12528
12529/*
12530 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12531 * call handler that can flush a range of pages to save on xcalls.
12532 */
12533static int sfmmu_xcall_save;
12534
12535/*
12536 * this routine is never used for demaping addresses backed by SRD hmeblks.
12537 */
12538static void
12539sfmmu_tlb_range_demap(demap_range_t *dmrp)
12540{
12541	sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12542	hatlock_t *hatlockp;
12543	cpuset_t cpuset;
12544	uint64_t sfmmu_pgcnt;
12545	pgcnt_t pgcnt = 0;
12546	int pgunload = 0;
12547	int dirtypg = 0;
12548	caddr_t addr = dmrp->dmr_addr;
12549	caddr_t eaddr;
12550	uint64_t bitvec = dmrp->dmr_bitvec;
12551
12552	ASSERT(bitvec & 1);
12553
12554	/*
12555	 * Flush TSB and calculate number of pages to flush.
12556	 */
12557	while (bitvec != 0) {
12558		dirtypg = 0;
12559		/*
12560		 * Find the first page to flush and then count how many
12561		 * pages there are after it that also need to be flushed.
12562		 * This way the number of TSB flushes is minimized.
12563		 */
12564		while ((bitvec & 1) == 0) {
12565			pgcnt++;
12566			addr += MMU_PAGESIZE;
12567			bitvec >>= 1;
12568		}
12569		while (bitvec & 1) {
12570			dirtypg++;
12571			bitvec >>= 1;
12572		}
12573		eaddr = addr + ptob(dirtypg);
12574		hatlockp = sfmmu_hat_enter(sfmmup);
12575		sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12576		sfmmu_hat_exit(hatlockp);
12577		pgunload += dirtypg;
12578		addr = eaddr;
12579		pgcnt += dirtypg;
12580	}
12581
12582	ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12583	if (sfmmup->sfmmu_free == 0) {
12584		addr = dmrp->dmr_addr;
12585		bitvec = dmrp->dmr_bitvec;
12586
12587		/*
12588		 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12589		 * as it will be used to pack argument for xt_some
12590		 */
12591		ASSERT((pgcnt > 0) &&
12592		    (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12593
12594		/*
12595		 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12596		 * the low 6 bits of sfmmup. This is doable since pgcnt
12597		 * always >= 1.
12598		 */
12599		ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12600		sfmmu_pgcnt = (uint64_t)sfmmup |
12601		    ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12602
12603		/*
12604		 * We must hold the hat lock during the flush of TLB,
12605		 * to avoid a race with sfmmu_invalidate_ctx(), where
12606		 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12607		 * causing TLB demap routine to skip flush on that MMU.
12608		 * If the context on a MMU has already been set to
12609		 * INVALID_CONTEXT, we just get an extra flush on
12610		 * that MMU.
12611		 */
12612		hatlockp = sfmmu_hat_enter(sfmmup);
12613		kpreempt_disable();
12614
12615		cpuset = sfmmup->sfmmu_cpusran;
12616		CPUSET_AND(cpuset, cpu_ready_set);
12617		CPUSET_DEL(cpuset, CPU->cpu_id);
12618
12619		SFMMU_XCALL_STATS(sfmmup);
12620		xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12621		    sfmmu_pgcnt);
12622
12623		for (; bitvec != 0; bitvec >>= 1) {
12624			if (bitvec & 1)
12625				vtag_flushpage(addr, (uint64_t)sfmmup);
12626			addr += MMU_PAGESIZE;
12627		}
12628		kpreempt_enable();
12629		sfmmu_hat_exit(hatlockp);
12630
12631		sfmmu_xcall_save += (pgunload-1);
12632	}
12633	dmrp->dmr_bitvec = 0;
12634}
12635
12636/*
12637 * In cases where we need to synchronize with TLB/TSB miss trap
12638 * handlers, _and_ need to flush the TLB, it's a lot easier to
12639 * throw away the context from the process than to do a
12640 * special song and dance to keep things consistent for the
12641 * handlers.
12642 *
12643 * Since the process suddenly ends up without a context and our caller
12644 * holds the hat lock, threads that fault after this function is called
12645 * will pile up on the lock.  We can then do whatever we need to
12646 * atomically from the context of the caller.  The first blocked thread
12647 * to resume executing will get the process a new context, and the
12648 * process will resume executing.
12649 *
12650 * One added advantage of this approach is that on MMUs that
12651 * support a "flush all" operation, we will delay the flush until
12652 * cnum wrap-around, and then flush the TLB one time.  This
12653 * is rather rare, so it's a lot less expensive than making 8000
12654 * x-calls to flush the TLB 8000 times.
12655 *
12656 * A per-process (PP) lock is used to synchronize ctx allocations in
12657 * resume() and ctx invalidations here.
12658 */
12659static void
12660sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12661{
12662	cpuset_t cpuset;
12663	int cnum, currcnum;
12664	mmu_ctx_t *mmu_ctxp;
12665	int i;
12666	uint_t pstate_save;
12667
12668	SFMMU_STAT(sf_ctx_inv);
12669
12670	ASSERT(sfmmu_hat_lock_held(sfmmup));
12671	ASSERT(sfmmup != ksfmmup);
12672
12673	kpreempt_disable();
12674
12675	mmu_ctxp = CPU_MMU_CTXP(CPU);
12676	ASSERT(mmu_ctxp);
12677	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12678	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12679
12680	currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12681
12682	pstate_save = sfmmu_disable_intrs();
12683
12684	lock_set(&sfmmup->sfmmu_ctx_lock);	/* acquire PP lock */
12685	/* set HAT cnum invalid across all context domains. */
12686	for (i = 0; i < max_mmu_ctxdoms; i++) {
12687
12688		cnum = 	sfmmup->sfmmu_ctxs[i].cnum;
12689		if (cnum == INVALID_CONTEXT) {
12690			continue;
12691		}
12692
12693		sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12694	}
12695	membar_enter();	/* make sure globally visible to all CPUs */
12696	lock_clear(&sfmmup->sfmmu_ctx_lock);	/* release PP lock */
12697
12698	sfmmu_enable_intrs(pstate_save);
12699
12700	cpuset = sfmmup->sfmmu_cpusran;
12701	CPUSET_DEL(cpuset, CPU->cpu_id);
12702	CPUSET_AND(cpuset, cpu_ready_set);
12703	if (!CPUSET_ISNULL(cpuset)) {
12704		SFMMU_XCALL_STATS(sfmmup);
12705		xt_some(cpuset, sfmmu_raise_tsb_exception,
12706		    (uint64_t)sfmmup, INVALID_CONTEXT);
12707		xt_sync(cpuset);
12708		SFMMU_STAT(sf_tsb_raise_exception);
12709		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12710	}
12711
12712	/*
12713	 * If the hat to-be-invalidated is the same as the current
12714	 * process on local CPU we need to invalidate
12715	 * this CPU context as well.
12716	 */
12717	if ((sfmmu_getctx_sec() == currcnum) &&
12718	    (currcnum != INVALID_CONTEXT)) {
12719		/* sets shared context to INVALID too */
12720		sfmmu_setctx_sec(INVALID_CONTEXT);
12721		sfmmu_clear_utsbinfo();
12722	}
12723
12724	SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12725
12726	kpreempt_enable();
12727
12728	/*
12729	 * we hold the hat lock, so nobody should allocate a context
12730	 * for us yet
12731	 */
12732	ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12733}
12734
12735#ifdef VAC
12736/*
12737 * We need to flush the cache in all cpus.  It is possible that
12738 * a process referenced a page as cacheable but has sinced exited
12739 * and cleared the mapping list.  We still to flush it but have no
12740 * state so all cpus is the only alternative.
12741 */
12742void
12743sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12744{
12745	cpuset_t cpuset;
12746
12747	kpreempt_disable();
12748	cpuset = cpu_ready_set;
12749	CPUSET_DEL(cpuset, CPU->cpu_id);
12750	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
12751	xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12752	xt_sync(cpuset);
12753	vac_flushpage(pfnum, vcolor);
12754	kpreempt_enable();
12755}
12756
12757void
12758sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12759{
12760	cpuset_t cpuset;
12761
12762	ASSERT(vcolor >= 0);
12763
12764	kpreempt_disable();
12765	cpuset = cpu_ready_set;
12766	CPUSET_DEL(cpuset, CPU->cpu_id);
12767	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
12768	xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12769	xt_sync(cpuset);
12770	vac_flushcolor(vcolor, pfnum);
12771	kpreempt_enable();
12772}
12773#endif	/* VAC */
12774
12775/*
12776 * We need to prevent processes from accessing the TSB using a cached physical
12777 * address.  It's alright if they try to access the TSB via virtual address
12778 * since they will just fault on that virtual address once the mapping has
12779 * been suspended.
12780 */
12781#pragma weak sendmondo_in_recover
12782
12783/* ARGSUSED */
12784static int
12785sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12786{
12787	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12788	sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12789	hatlock_t *hatlockp;
12790	sf_scd_t *scdp;
12791
12792	if (flags != HAT_PRESUSPEND)
12793		return (0);
12794
12795	/*
12796	 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12797	 * be a shared hat, then set SCD's tsbinfo's flag.
12798	 * If tsb is not shared, sfmmup is a private hat, then set
12799	 * its private tsbinfo's flag.
12800	 */
12801	hatlockp = sfmmu_hat_enter(sfmmup);
12802	tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12803
12804	if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12805		sfmmu_tsb_inv_ctx(sfmmup);
12806		sfmmu_hat_exit(hatlockp);
12807	} else {
12808		/* release lock on the shared hat */
12809		sfmmu_hat_exit(hatlockp);
12810		/* sfmmup is a shared hat */
12811		ASSERT(sfmmup->sfmmu_scdhat);
12812		scdp = sfmmup->sfmmu_scdp;
12813		ASSERT(scdp != NULL);
12814		/* get private hat from the scd list */
12815		mutex_enter(&scdp->scd_mutex);
12816		sfmmup = scdp->scd_sf_list;
12817		while (sfmmup != NULL) {
12818			hatlockp = sfmmu_hat_enter(sfmmup);
12819			/*
12820			 * We do not call sfmmu_tsb_inv_ctx here because
12821			 * sendmondo_in_recover check is only needed for
12822			 * sun4u.
12823			 */
12824			sfmmu_invalidate_ctx(sfmmup);
12825			sfmmu_hat_exit(hatlockp);
12826			sfmmup = sfmmup->sfmmu_scd_link.next;
12827
12828		}
12829		mutex_exit(&scdp->scd_mutex);
12830	}
12831	return (0);
12832}
12833
12834static void
12835sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12836{
12837	extern uint32_t sendmondo_in_recover;
12838
12839	ASSERT(sfmmu_hat_lock_held(sfmmup));
12840
12841	/*
12842	 * For Cheetah+ Erratum 25:
12843	 * Wait for any active recovery to finish.  We can't risk
12844	 * relocating the TSB of the thread running mondo_recover_proc()
12845	 * since, if we did that, we would deadlock.  The scenario we are
12846	 * trying to avoid is as follows:
12847	 *
12848	 * THIS CPU			RECOVER CPU
12849	 * --------			-----------
12850	 *				Begins recovery, walking through TSB
12851	 * hat_pagesuspend() TSB TTE
12852	 *				TLB miss on TSB TTE, spins at TL1
12853	 * xt_sync()
12854	 *	send_mondo_timeout()
12855	 *	mondo_recover_proc()
12856	 *	((deadlocked))
12857	 *
12858	 * The second half of the workaround is that mondo_recover_proc()
12859	 * checks to see if the tsb_info has the RELOC flag set, and if it
12860	 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12861	 * and hence avoiding the TLB miss that could result in a deadlock.
12862	 */
12863	if (&sendmondo_in_recover) {
12864		membar_enter();	/* make sure RELOC flag visible */
12865		while (sendmondo_in_recover) {
12866			drv_usecwait(1);
12867			membar_consumer();
12868		}
12869	}
12870
12871	sfmmu_invalidate_ctx(sfmmup);
12872}
12873
12874/* ARGSUSED */
12875static int
12876sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12877	void *tsbinfo, pfn_t newpfn)
12878{
12879	hatlock_t *hatlockp;
12880	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12881	sfmmu_t	*sfmmup = tsbinfop->tsb_sfmmu;
12882
12883	if (flags != HAT_POSTUNSUSPEND)
12884		return (0);
12885
12886	hatlockp = sfmmu_hat_enter(sfmmup);
12887
12888	SFMMU_STAT(sf_tsb_reloc);
12889
12890	/*
12891	 * The process may have swapped out while we were relocating one
12892	 * of its TSBs.  If so, don't bother doing the setup since the
12893	 * process can't be using the memory anymore.
12894	 */
12895	if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12896		ASSERT(va == tsbinfop->tsb_va);
12897		sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12898
12899		if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12900			sfmmu_inv_tsb(tsbinfop->tsb_va,
12901			    TSB_BYTES(tsbinfop->tsb_szc));
12902			tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12903		}
12904	}
12905
12906	membar_exit();
12907	tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12908	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12909
12910	sfmmu_hat_exit(hatlockp);
12911
12912	return (0);
12913}
12914
12915/*
12916 * Allocate and initialize a tsb_info structure.  Note that we may or may not
12917 * allocate a TSB here, depending on the flags passed in.
12918 */
12919static int
12920sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12921	uint_t flags, sfmmu_t *sfmmup)
12922{
12923	int err;
12924
12925	*tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12926	    sfmmu_tsbinfo_cache, KM_SLEEP);
12927
12928	if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12929	    tsb_szc, flags, sfmmup)) != 0) {
12930		kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12931		SFMMU_STAT(sf_tsb_allocfail);
12932		*tsbinfopp = NULL;
12933		return (err);
12934	}
12935	SFMMU_STAT(sf_tsb_alloc);
12936
12937	/*
12938	 * Bump the TSB size counters for this TSB size.
12939	 */
12940	(*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12941	return (0);
12942}
12943
12944static void
12945sfmmu_tsb_free(struct tsb_info *tsbinfo)
12946{
12947	caddr_t tsbva = tsbinfo->tsb_va;
12948	uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12949	struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12950	vmem_t	*vmp = tsbinfo->tsb_vmp;
12951
12952	/*
12953	 * If we allocated this TSB from relocatable kernel memory, then we
12954	 * need to uninstall the callback handler.
12955	 */
12956	if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12957		uintptr_t slab_mask;
12958		caddr_t slab_vaddr;
12959		page_t **ppl;
12960		int ret;
12961
12962		ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12963		if (tsb_size > MMU_PAGESIZE4M)
12964			slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12965		else
12966			slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12967		slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12968
12969		ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12970		ASSERT(ret == 0);
12971		hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12972		    0, NULL);
12973		as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12974	}
12975
12976	if (kmem_cachep != NULL) {
12977		kmem_cache_free(kmem_cachep, tsbva);
12978	} else {
12979		vmem_xfree(vmp, (void *)tsbva, tsb_size);
12980	}
12981	tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12982	atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12983}
12984
12985static void
12986sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12987{
12988	if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12989		sfmmu_tsb_free(tsbinfo);
12990	}
12991	kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12992
12993}
12994
12995/*
12996 * Setup all the references to physical memory for this tsbinfo.
12997 * The underlying page(s) must be locked.
12998 */
12999static void
13000sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
13001{
13002	ASSERT(pfn != PFN_INVALID);
13003	ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
13004
13005#ifndef sun4v
13006	if (tsbinfo->tsb_szc == 0) {
13007		sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
13008		    PROT_WRITE|PROT_READ, TTE8K);
13009	} else {
13010		/*
13011		 * Round down PA and use a large mapping; the handlers will
13012		 * compute the TSB pointer at the correct offset into the
13013		 * big virtual page.  NOTE: this assumes all TSBs larger
13014		 * than 8K must come from physically contiguous slabs of
13015		 * size tsb_slab_size.
13016		 */
13017		sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
13018		    PROT_WRITE|PROT_READ, tsb_slab_ttesz);
13019	}
13020	tsbinfo->tsb_pa = ptob(pfn);
13021
13022	TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
13023	TTE_SET_MOD(&tsbinfo->tsb_tte);    /* enable writes */
13024
13025	ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
13026	ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
13027#else /* sun4v */
13028	tsbinfo->tsb_pa = ptob(pfn);
13029#endif /* sun4v */
13030}
13031
13032
13033/*
13034 * Returns zero on success, ENOMEM if over the high water mark,
13035 * or EAGAIN if the caller needs to retry with a smaller TSB
13036 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
13037 *
13038 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
13039 * is specified and the TSB requested is PAGESIZE, though it
13040 * may sleep waiting for memory if sufficient memory is not
13041 * available.
13042 */
13043static int
13044sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
13045    int tsbcode, uint_t flags, sfmmu_t *sfmmup)
13046{
13047	caddr_t vaddr = NULL;
13048	caddr_t slab_vaddr;
13049	uintptr_t slab_mask;
13050	int tsbbytes = TSB_BYTES(tsbcode);
13051	int lowmem = 0;
13052	struct kmem_cache *kmem_cachep = NULL;
13053	vmem_t *vmp = NULL;
13054	lgrp_id_t lgrpid = LGRP_NONE;
13055	pfn_t pfn;
13056	uint_t cbflags = HAC_SLEEP;
13057	page_t **pplist;
13058	int ret;
13059
13060	ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
13061	if (tsbbytes > MMU_PAGESIZE4M)
13062		slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
13063	else
13064		slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
13065
13066	if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
13067		flags |= TSB_ALLOC;
13068
13069	ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
13070
13071	tsbinfo->tsb_sfmmu = sfmmup;
13072
13073	/*
13074	 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
13075	 * return.
13076	 */
13077	if ((flags & TSB_ALLOC) == 0) {
13078		tsbinfo->tsb_szc = tsbcode;
13079		tsbinfo->tsb_ttesz_mask = tteszmask;
13080		tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
13081		tsbinfo->tsb_pa = -1;
13082		tsbinfo->tsb_tte.ll = 0;
13083		tsbinfo->tsb_next = NULL;
13084		tsbinfo->tsb_flags = TSB_SWAPPED;
13085		tsbinfo->tsb_cache = NULL;
13086		tsbinfo->tsb_vmp = NULL;
13087		return (0);
13088	}
13089
13090#ifdef DEBUG
13091	/*
13092	 * For debugging:
13093	 * Randomly force allocation failures every tsb_alloc_mtbf
13094	 * tries if TSB_FORCEALLOC is not specified.  This will
13095	 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
13096	 * it is even, to allow testing of both failure paths...
13097	 */
13098	if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
13099	    (tsb_alloc_count++ == tsb_alloc_mtbf)) {
13100		tsb_alloc_count = 0;
13101		tsb_alloc_fail_mtbf++;
13102		return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
13103	}
13104#endif	/* DEBUG */
13105
13106	/*
13107	 * Enforce high water mark if we are not doing a forced allocation
13108	 * and are not shrinking a process' TSB.
13109	 */
13110	if ((flags & TSB_SHRINK) == 0 &&
13111	    (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
13112		if ((flags & TSB_FORCEALLOC) == 0)
13113			return (ENOMEM);
13114		lowmem = 1;
13115	}
13116
13117	/*
13118	 * Allocate from the correct location based upon the size of the TSB
13119	 * compared to the base page size, and what memory conditions dictate.
13120	 * Note we always do nonblocking allocations from the TSB arena since
13121	 * we don't want memory fragmentation to cause processes to block
13122	 * indefinitely waiting for memory; until the kernel algorithms that
13123	 * coalesce large pages are improved this is our best option.
13124	 *
13125	 * Algorithm:
13126	 *	If allocating a "large" TSB (>8K), allocate from the
13127	 *		appropriate kmem_tsb_default_arena vmem arena
13128	 *	else if low on memory or the TSB_FORCEALLOC flag is set or
13129	 *	tsb_forceheap is set
13130	 *		Allocate from kernel heap via sfmmu_tsb8k_cache with
13131	 *		KM_SLEEP (never fails)
13132	 *	else
13133	 *		Allocate from appropriate sfmmu_tsb_cache with
13134	 *		KM_NOSLEEP
13135	 *	endif
13136	 */
13137	if (tsb_lgrp_affinity)
13138		lgrpid = lgrp_home_id(curthread);
13139	if (lgrpid == LGRP_NONE)
13140		lgrpid = 0;	/* use lgrp of boot CPU */
13141
13142	if (tsbbytes > MMU_PAGESIZE) {
13143		if (tsbbytes > MMU_PAGESIZE4M) {
13144			vmp = kmem_bigtsb_default_arena[lgrpid];
13145			vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13146			    0, 0, NULL, NULL, VM_NOSLEEP);
13147		} else {
13148			vmp = kmem_tsb_default_arena[lgrpid];
13149			vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13150			    0, 0, NULL, NULL, VM_NOSLEEP);
13151		}
13152#ifdef	DEBUG
13153	} else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
13154#else	/* !DEBUG */
13155	} else if (lowmem || (flags & TSB_FORCEALLOC)) {
13156#endif	/* DEBUG */
13157		kmem_cachep = sfmmu_tsb8k_cache;
13158		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
13159		ASSERT(vaddr != NULL);
13160	} else {
13161		kmem_cachep = sfmmu_tsb_cache[lgrpid];
13162		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
13163	}
13164
13165	tsbinfo->tsb_cache = kmem_cachep;
13166	tsbinfo->tsb_vmp = vmp;
13167
13168	if (vaddr == NULL) {
13169		return (EAGAIN);
13170	}
13171
13172	atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
13173	kmem_cachep = tsbinfo->tsb_cache;
13174
13175	/*
13176	 * If we are allocating from outside the cage, then we need to
13177	 * register a relocation callback handler.  Note that for now
13178	 * since pseudo mappings always hang off of the slab's root page,
13179	 * we need only lock the first 8K of the TSB slab.  This is a bit
13180	 * hacky but it is good for performance.
13181	 */
13182	if (kmem_cachep != sfmmu_tsb8k_cache) {
13183		slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
13184		ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
13185		ASSERT(ret == 0);
13186		ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
13187		    cbflags, (void *)tsbinfo, &pfn, NULL);
13188
13189		/*
13190		 * Need to free up resources if we could not successfully
13191		 * add the callback function and return an error condition.
13192		 */
13193		if (ret != 0) {
13194			if (kmem_cachep) {
13195				kmem_cache_free(kmem_cachep, vaddr);
13196			} else {
13197				vmem_xfree(vmp, (void *)vaddr, tsbbytes);
13198			}
13199			as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
13200			    S_WRITE);
13201			return (EAGAIN);
13202		}
13203	} else {
13204		/*
13205		 * Since allocation of 8K TSBs from heap is rare and occurs
13206		 * during memory pressure we allocate them from permanent
13207		 * memory rather than using callbacks to get the PFN.
13208		 */
13209		pfn = hat_getpfnum(kas.a_hat, vaddr);
13210	}
13211
13212	tsbinfo->tsb_va = vaddr;
13213	tsbinfo->tsb_szc = tsbcode;
13214	tsbinfo->tsb_ttesz_mask = tteszmask;
13215	tsbinfo->tsb_next = NULL;
13216	tsbinfo->tsb_flags = 0;
13217
13218	sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
13219
13220	sfmmu_inv_tsb(vaddr, tsbbytes);
13221
13222	if (kmem_cachep != sfmmu_tsb8k_cache) {
13223		as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
13224	}
13225
13226	return (0);
13227}
13228
13229/*
13230 * Initialize per cpu tsb and per cpu tsbmiss_area
13231 */
13232void
13233sfmmu_init_tsbs(void)
13234{
13235	int i;
13236	struct tsbmiss	*tsbmissp;
13237	struct kpmtsbm	*kpmtsbmp;
13238#ifndef sun4v
13239	extern int	dcache_line_mask;
13240#endif /* sun4v */
13241	extern uint_t	vac_colors;
13242
13243	/*
13244	 * Init. tsb miss area.
13245	 */
13246	tsbmissp = tsbmiss_area;
13247
13248	for (i = 0; i < NCPU; tsbmissp++, i++) {
13249		/*
13250		 * initialize the tsbmiss area.
13251		 * Do this for all possible CPUs as some may be added
13252		 * while the system is running. There is no cost to this.
13253		 */
13254		tsbmissp->ksfmmup = ksfmmup;
13255#ifndef sun4v
13256		tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13257#endif /* sun4v */
13258		tsbmissp->khashstart =
13259		    (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13260		tsbmissp->uhashstart =
13261		    (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13262		tsbmissp->khashsz = khmehash_num;
13263		tsbmissp->uhashsz = uhmehash_num;
13264	}
13265
13266	sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13267	    sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13268
13269	if (kpm_enable == 0)
13270		return;
13271
13272	/* -- Begin KPM specific init -- */
13273
13274	if (kpm_smallpages) {
13275		/*
13276		 * If we're using base pagesize pages for seg_kpm
13277		 * mappings, we use the kernel TSB since we can't afford
13278		 * to allocate a second huge TSB for these mappings.
13279		 */
13280		kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13281		kpm_tsbsz = ktsb_szcode;
13282		kpmsm_tsbbase = kpm_tsbbase;
13283		kpmsm_tsbsz = kpm_tsbsz;
13284	} else {
13285		/*
13286		 * In VAC conflict case, just put the entries in the
13287		 * kernel 8K indexed TSB for now so we can find them.
13288		 * This could really be changed in the future if we feel
13289		 * the need...
13290		 */
13291		kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13292		kpmsm_tsbsz = ktsb_szcode;
13293		kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13294		kpm_tsbsz = ktsb4m_szcode;
13295	}
13296
13297	kpmtsbmp = kpmtsbm_area;
13298	for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13299		/*
13300		 * Initialize the kpmtsbm area.
13301		 * Do this for all possible CPUs as some may be added
13302		 * while the system is running. There is no cost to this.
13303		 */
13304		kpmtsbmp->vbase = kpm_vbase;
13305		kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13306		kpmtsbmp->sz_shift = kpm_size_shift;
13307		kpmtsbmp->kpmp_shift = kpmp_shift;
13308		kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13309		if (kpm_smallpages == 0) {
13310			kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13311			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13312		} else {
13313			kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13314			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13315		}
13316		kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13317		kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13318#ifdef	DEBUG
13319		kpmtsbmp->flags |= (kpm_tsbmtl) ?  KPMTSBM_TLTSBM_FLAG : 0;
13320#endif	/* DEBUG */
13321		if (ktsb_phys)
13322			kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13323	}
13324
13325	/* -- End KPM specific init -- */
13326}
13327
13328/* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13329struct tsb_info ktsb_info[2];
13330
13331/*
13332 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13333 */
13334void
13335sfmmu_init_ktsbinfo()
13336{
13337	ASSERT(ksfmmup != NULL);
13338	ASSERT(ksfmmup->sfmmu_tsb == NULL);
13339	/*
13340	 * Allocate tsbinfos for kernel and copy in data
13341	 * to make debug easier and sun4v setup easier.
13342	 */
13343	ktsb_info[0].tsb_sfmmu = ksfmmup;
13344	ktsb_info[0].tsb_szc = ktsb_szcode;
13345	ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13346	ktsb_info[0].tsb_va = ktsb_base;
13347	ktsb_info[0].tsb_pa = ktsb_pbase;
13348	ktsb_info[0].tsb_flags = 0;
13349	ktsb_info[0].tsb_tte.ll = 0;
13350	ktsb_info[0].tsb_cache = NULL;
13351
13352	ktsb_info[1].tsb_sfmmu = ksfmmup;
13353	ktsb_info[1].tsb_szc = ktsb4m_szcode;
13354	ktsb_info[1].tsb_ttesz_mask = TSB4M;
13355	ktsb_info[1].tsb_va = ktsb4m_base;
13356	ktsb_info[1].tsb_pa = ktsb4m_pbase;
13357	ktsb_info[1].tsb_flags = 0;
13358	ktsb_info[1].tsb_tte.ll = 0;
13359	ktsb_info[1].tsb_cache = NULL;
13360
13361	/* Link them into ksfmmup. */
13362	ktsb_info[0].tsb_next = &ktsb_info[1];
13363	ktsb_info[1].tsb_next = NULL;
13364	ksfmmup->sfmmu_tsb = &ktsb_info[0];
13365
13366	sfmmu_setup_tsbinfo(ksfmmup);
13367}
13368
13369/*
13370 * Cache the last value returned from va_to_pa().  If the VA specified
13371 * in the current call to cached_va_to_pa() maps to the same Page (as the
13372 * previous call to cached_va_to_pa()), then compute the PA using
13373 * cached info, else call va_to_pa().
13374 *
13375 * Note: this function is neither MT-safe nor consistent in the presence
13376 * of multiple, interleaved threads.  This function was created to enable
13377 * an optimization used during boot (at a point when there's only one thread
13378 * executing on the "boot CPU", and before startup_vm() has been called).
13379 */
13380static uint64_t
13381cached_va_to_pa(void *vaddr)
13382{
13383	static uint64_t prev_vaddr_base = 0;
13384	static uint64_t prev_pfn = 0;
13385
13386	if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13387		return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13388	} else {
13389		uint64_t pa = va_to_pa(vaddr);
13390
13391		if (pa != ((uint64_t)-1)) {
13392			/*
13393			 * Computed physical address is valid.  Cache its
13394			 * related info for the next cached_va_to_pa() call.
13395			 */
13396			prev_pfn = pa & MMU_PAGEMASK;
13397			prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13398		}
13399
13400		return (pa);
13401	}
13402}
13403
13404/*
13405 * Carve up our nucleus hblk region.  We may allocate more hblks than
13406 * asked due to rounding errors but we are guaranteed to have at least
13407 * enough space to allocate the requested number of hblk8's and hblk1's.
13408 */
13409void
13410sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13411{
13412	struct hme_blk *hmeblkp;
13413	size_t hme8blk_sz, hme1blk_sz;
13414	size_t i;
13415	size_t hblk8_bound;
13416	ulong_t j = 0, k = 0;
13417
13418	ASSERT(addr != NULL && size != 0);
13419
13420	/* Need to use proper structure alignment */
13421	hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13422	hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13423
13424	nucleus_hblk8.list = (void *)addr;
13425	nucleus_hblk8.index = 0;
13426
13427	/*
13428	 * Use as much memory as possible for hblk8's since we
13429	 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13430	 * We need to hold back enough space for the hblk1's which
13431	 * we'll allocate next.
13432	 */
13433	hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13434	for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13435		hmeblkp = (struct hme_blk *)addr;
13436		addr += hme8blk_sz;
13437		hmeblkp->hblk_nuc_bit = 1;
13438		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13439	}
13440	nucleus_hblk8.len = j;
13441	ASSERT(j >= nhblk8);
13442	SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13443
13444	nucleus_hblk1.list = (void *)addr;
13445	nucleus_hblk1.index = 0;
13446	for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13447		hmeblkp = (struct hme_blk *)addr;
13448		addr += hme1blk_sz;
13449		hmeblkp->hblk_nuc_bit = 1;
13450		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13451	}
13452	ASSERT(k >= nhblk1);
13453	nucleus_hblk1.len = k;
13454	SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13455}
13456
13457/*
13458 * This function is currently not supported on this platform. For what
13459 * it's supposed to do, see hat.c and hat_srmmu.c
13460 */
13461/* ARGSUSED */
13462faultcode_t
13463hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13464    uint_t flags)
13465{
13466	ASSERT(hat->sfmmu_xhat_provider == NULL);
13467	return (FC_NOSUPPORT);
13468}
13469
13470/*
13471 * Searchs the mapping list of the page for a mapping of the same size. If not
13472 * found the corresponding bit is cleared in the p_index field. When large
13473 * pages are more prevalent in the system, we can maintain the mapping list
13474 * in order and we don't have to traverse the list each time. Just check the
13475 * next and prev entries, and if both are of different size, we clear the bit.
13476 */
13477static void
13478sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13479{
13480	struct sf_hment *sfhmep;
13481	struct hme_blk *hmeblkp;
13482	int	index;
13483	pgcnt_t	npgs;
13484
13485	ASSERT(ttesz > TTE8K);
13486
13487	ASSERT(sfmmu_mlist_held(pp));
13488
13489	ASSERT(PP_ISMAPPED_LARGE(pp));
13490
13491	/*
13492	 * Traverse mapping list looking for another mapping of same size.
13493	 * since we only want to clear index field if all mappings of
13494	 * that size are gone.
13495	 */
13496
13497	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13498		if (IS_PAHME(sfhmep))
13499			continue;
13500		hmeblkp = sfmmu_hmetohblk(sfhmep);
13501		if (hmeblkp->hblk_xhat_bit)
13502			continue;
13503		if (hme_size(sfhmep) == ttesz) {
13504			/*
13505			 * another mapping of the same size. don't clear index.
13506			 */
13507			return;
13508		}
13509	}
13510
13511	/*
13512	 * Clear the p_index bit for large page.
13513	 */
13514	index = PAGESZ_TO_INDEX(ttesz);
13515	npgs = TTEPAGES(ttesz);
13516	while (npgs-- > 0) {
13517		ASSERT(pp->p_index & index);
13518		pp->p_index &= ~index;
13519		pp = PP_PAGENEXT(pp);
13520	}
13521}
13522
13523/*
13524 * return supported features
13525 */
13526/* ARGSUSED */
13527int
13528hat_supported(enum hat_features feature, void *arg)
13529{
13530	switch (feature) {
13531	case    HAT_SHARED_PT:
13532	case	HAT_DYNAMIC_ISM_UNMAP:
13533	case	HAT_VMODSORT:
13534		return (1);
13535	case	HAT_SHARED_REGIONS:
13536		if (shctx_on)
13537			return (1);
13538		else
13539			return (0);
13540	default:
13541		return (0);
13542	}
13543}
13544
13545void
13546hat_enter(struct hat *hat)
13547{
13548	hatlock_t	*hatlockp;
13549
13550	if (hat != ksfmmup) {
13551		hatlockp = TSB_HASH(hat);
13552		mutex_enter(HATLOCK_MUTEXP(hatlockp));
13553	}
13554}
13555
13556void
13557hat_exit(struct hat *hat)
13558{
13559	hatlock_t	*hatlockp;
13560
13561	if (hat != ksfmmup) {
13562		hatlockp = TSB_HASH(hat);
13563		mutex_exit(HATLOCK_MUTEXP(hatlockp));
13564	}
13565}
13566
13567/*ARGSUSED*/
13568void
13569hat_reserve(struct as *as, caddr_t addr, size_t len)
13570{
13571}
13572
13573static void
13574hat_kstat_init(void)
13575{
13576	kstat_t *ksp;
13577
13578	ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13579	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13580	    KSTAT_FLAG_VIRTUAL);
13581	if (ksp) {
13582		ksp->ks_data = (void *) &sfmmu_global_stat;
13583		kstat_install(ksp);
13584	}
13585	ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13586	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13587	    KSTAT_FLAG_VIRTUAL);
13588	if (ksp) {
13589		ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13590		kstat_install(ksp);
13591	}
13592	ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13593	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13594	    KSTAT_FLAG_WRITABLE);
13595	if (ksp) {
13596		ksp->ks_update = sfmmu_kstat_percpu_update;
13597		kstat_install(ksp);
13598	}
13599}
13600
13601/* ARGSUSED */
13602static int
13603sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13604{
13605	struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13606	struct tsbmiss *tsbm = tsbmiss_area;
13607	struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13608	int i;
13609
13610	ASSERT(cpu_kstat);
13611	if (rw == KSTAT_READ) {
13612		for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13613			cpu_kstat->sf_itlb_misses = 0;
13614			cpu_kstat->sf_dtlb_misses = 0;
13615			cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13616			    tsbm->uprot_traps;
13617			cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13618			    kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13619			cpu_kstat->sf_tsb_hits = 0;
13620			cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13621			cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13622		}
13623	} else {
13624		/* KSTAT_WRITE is used to clear stats */
13625		for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13626			tsbm->utsb_misses = 0;
13627			tsbm->ktsb_misses = 0;
13628			tsbm->uprot_traps = 0;
13629			tsbm->kprot_traps = 0;
13630			kpmtsbm->kpm_dtlb_misses = 0;
13631			kpmtsbm->kpm_tsb_misses = 0;
13632		}
13633	}
13634	return (0);
13635}
13636
13637#ifdef	DEBUG
13638
13639tte_t  *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13640
13641/*
13642 * A tte checker. *orig_old is the value we read before cas.
13643 *	*cur is the value returned by cas.
13644 *	*new is the desired value when we do the cas.
13645 *
13646 *	*hmeblkp is currently unused.
13647 */
13648
13649/* ARGSUSED */
13650void
13651chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13652{
13653	pfn_t i, j, k;
13654	int cpuid = CPU->cpu_id;
13655
13656	gorig[cpuid] = orig_old;
13657	gcur[cpuid] = cur;
13658	gnew[cpuid] = new;
13659
13660#ifdef lint
13661	hmeblkp = hmeblkp;
13662#endif
13663
13664	if (TTE_IS_VALID(orig_old)) {
13665		if (TTE_IS_VALID(cur)) {
13666			i = TTE_TO_TTEPFN(orig_old);
13667			j = TTE_TO_TTEPFN(cur);
13668			k = TTE_TO_TTEPFN(new);
13669			if (i != j) {
13670				/* remap error? */
13671				panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13672			}
13673
13674			if (i != k) {
13675				/* remap error? */
13676				panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13677			}
13678		} else {
13679			if (TTE_IS_VALID(new)) {
13680				panic("chk_tte: invalid cur? ");
13681			}
13682
13683			i = TTE_TO_TTEPFN(orig_old);
13684			k = TTE_TO_TTEPFN(new);
13685			if (i != k) {
13686				panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13687			}
13688		}
13689	} else {
13690		if (TTE_IS_VALID(cur)) {
13691			j = TTE_TO_TTEPFN(cur);
13692			if (TTE_IS_VALID(new)) {
13693				k = TTE_TO_TTEPFN(new);
13694				if (j != k) {
13695					panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13696					    j, k);
13697				}
13698			} else {
13699				panic("chk_tte: why here?");
13700			}
13701		} else {
13702			if (!TTE_IS_VALID(new)) {
13703				panic("chk_tte: why here2 ?");
13704			}
13705		}
13706	}
13707}
13708
13709#endif /* DEBUG */
13710
13711extern void prefetch_tsbe_read(struct tsbe *);
13712extern void prefetch_tsbe_write(struct tsbe *);
13713
13714
13715/*
13716 * We want to prefetch 7 cache lines ahead for our read prefetch.  This gives
13717 * us optimal performance on Cheetah+.  You can only have 8 outstanding
13718 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13719 * prefetch to make the most utilization of the prefetch capability.
13720 */
13721#define	TSBE_PREFETCH_STRIDE (7)
13722
13723void
13724sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13725{
13726	int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13727	int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13728	int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13729	int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13730	struct tsbe *old;
13731	struct tsbe *new;
13732	struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13733	uint64_t va;
13734	int new_offset;
13735	int i;
13736	int vpshift;
13737	int last_prefetch;
13738
13739	if (old_bytes == new_bytes) {
13740		bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13741	} else {
13742
13743		/*
13744		 * A TSBE is 16 bytes which means there are four TSBE's per
13745		 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13746		 */
13747		old = (struct tsbe *)old_tsbinfo->tsb_va;
13748		last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13749		for (i = 0; i < old_entries; i++, old++) {
13750			if (((i & (4-1)) == 0) && (i < last_prefetch))
13751				prefetch_tsbe_read(old);
13752			if (!old->tte_tag.tag_invalid) {
13753				/*
13754				 * We have a valid TTE to remap.  Check the
13755				 * size.  We won't remap 64K or 512K TTEs
13756				 * because they span more than one TSB entry
13757				 * and are indexed using an 8K virt. page.
13758				 * Ditto for 32M and 256M TTEs.
13759				 */
13760				if (TTE_CSZ(&old->tte_data) == TTE64K ||
13761				    TTE_CSZ(&old->tte_data) == TTE512K)
13762					continue;
13763				if (mmu_page_sizes == max_mmu_page_sizes) {
13764					if (TTE_CSZ(&old->tte_data) == TTE32M ||
13765					    TTE_CSZ(&old->tte_data) == TTE256M)
13766						continue;
13767				}
13768
13769				/* clear the lower 22 bits of the va */
13770				va = *(uint64_t *)old << 22;
13771				/* turn va into a virtual pfn */
13772				va >>= 22 - TSB_START_SIZE;
13773				/*
13774				 * or in bits from the offset in the tsb
13775				 * to get the real virtual pfn. These
13776				 * correspond to bits [21:13] in the va
13777				 */
13778				vpshift =
13779				    TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13780				    0x1ff;
13781				va |= (i << vpshift);
13782				va >>= vpshift;
13783				new_offset = va & (new_entries - 1);
13784				new = new_base + new_offset;
13785				prefetch_tsbe_write(new);
13786				*new = *old;
13787			}
13788		}
13789	}
13790}
13791
13792/*
13793 * unused in sfmmu
13794 */
13795void
13796hat_dump(void)
13797{
13798}
13799
13800/*
13801 * Called when a thread is exiting and we have switched to the kernel address
13802 * space.  Perform the same VM initialization resume() uses when switching
13803 * processes.
13804 *
13805 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13806 * we call it anyway in case the semantics change in the future.
13807 */
13808/*ARGSUSED*/
13809void
13810hat_thread_exit(kthread_t *thd)
13811{
13812	uint_t pgsz_cnum;
13813	uint_t pstate_save;
13814
13815	ASSERT(thd->t_procp->p_as == &kas);
13816
13817	pgsz_cnum = KCONTEXT;
13818#ifdef sun4u
13819	pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13820#endif
13821
13822	/*
13823	 * Note that sfmmu_load_mmustate() is currently a no-op for
13824	 * kernel threads. We need to disable interrupts here,
13825	 * simply because otherwise sfmmu_load_mmustate() would panic
13826	 * if the caller does not disable interrupts.
13827	 */
13828	pstate_save = sfmmu_disable_intrs();
13829
13830	/* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13831	sfmmu_setctx_sec(pgsz_cnum);
13832	sfmmu_load_mmustate(ksfmmup);
13833	sfmmu_enable_intrs(pstate_save);
13834}
13835
13836
13837/*
13838 * SRD support
13839 */
13840#define	SRD_HASH_FUNCTION(vp)	(((((uintptr_t)(vp)) >> 4) ^ \
13841				    (((uintptr_t)(vp)) >> 11)) & \
13842				    srd_hashmask)
13843
13844/*
13845 * Attach the process to the srd struct associated with the exec vnode
13846 * from which the process is started.
13847 */
13848void
13849hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13850{
13851	uint_t hash = SRD_HASH_FUNCTION(evp);
13852	sf_srd_t *srdp;
13853	sf_srd_t *newsrdp;
13854
13855	ASSERT(sfmmup != ksfmmup);
13856	ASSERT(sfmmup->sfmmu_srdp == NULL);
13857
13858	if (!shctx_on) {
13859		return;
13860	}
13861
13862	VN_HOLD(evp);
13863
13864	if (srd_buckets[hash].srdb_srdp != NULL) {
13865		mutex_enter(&srd_buckets[hash].srdb_lock);
13866		for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13867		    srdp = srdp->srd_hash) {
13868			if (srdp->srd_evp == evp) {
13869				ASSERT(srdp->srd_refcnt >= 0);
13870				sfmmup->sfmmu_srdp = srdp;
13871				atomic_add_32(
13872				    (volatile uint_t *)&srdp->srd_refcnt, 1);
13873				mutex_exit(&srd_buckets[hash].srdb_lock);
13874				return;
13875			}
13876		}
13877		mutex_exit(&srd_buckets[hash].srdb_lock);
13878	}
13879	newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13880	ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13881
13882	newsrdp->srd_evp = evp;
13883	newsrdp->srd_refcnt = 1;
13884	newsrdp->srd_hmergnfree = NULL;
13885	newsrdp->srd_ismrgnfree = NULL;
13886
13887	mutex_enter(&srd_buckets[hash].srdb_lock);
13888	for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13889	    srdp = srdp->srd_hash) {
13890		if (srdp->srd_evp == evp) {
13891			ASSERT(srdp->srd_refcnt >= 0);
13892			sfmmup->sfmmu_srdp = srdp;
13893			atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
13894			mutex_exit(&srd_buckets[hash].srdb_lock);
13895			kmem_cache_free(srd_cache, newsrdp);
13896			return;
13897		}
13898	}
13899	newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13900	srd_buckets[hash].srdb_srdp = newsrdp;
13901	sfmmup->sfmmu_srdp = newsrdp;
13902
13903	mutex_exit(&srd_buckets[hash].srdb_lock);
13904
13905}
13906
13907static void
13908sfmmu_leave_srd(sfmmu_t *sfmmup)
13909{
13910	vnode_t *evp;
13911	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13912	uint_t hash;
13913	sf_srd_t **prev_srdpp;
13914	sf_region_t *rgnp;
13915	sf_region_t *nrgnp;
13916#ifdef DEBUG
13917	int rgns = 0;
13918#endif
13919	int i;
13920
13921	ASSERT(sfmmup != ksfmmup);
13922	ASSERT(srdp != NULL);
13923	ASSERT(srdp->srd_refcnt > 0);
13924	ASSERT(sfmmup->sfmmu_scdp == NULL);
13925	ASSERT(sfmmup->sfmmu_free == 1);
13926
13927	sfmmup->sfmmu_srdp = NULL;
13928	evp = srdp->srd_evp;
13929	ASSERT(evp != NULL);
13930	if (atomic_add_32_nv(
13931	    (volatile uint_t *)&srdp->srd_refcnt, -1)) {
13932		VN_RELE(evp);
13933		return;
13934	}
13935
13936	hash = SRD_HASH_FUNCTION(evp);
13937	mutex_enter(&srd_buckets[hash].srdb_lock);
13938	for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13939	    (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13940		if (srdp->srd_evp == evp) {
13941			break;
13942		}
13943	}
13944	if (srdp == NULL || srdp->srd_refcnt) {
13945		mutex_exit(&srd_buckets[hash].srdb_lock);
13946		VN_RELE(evp);
13947		return;
13948	}
13949	*prev_srdpp = srdp->srd_hash;
13950	mutex_exit(&srd_buckets[hash].srdb_lock);
13951
13952	ASSERT(srdp->srd_refcnt == 0);
13953	VN_RELE(evp);
13954
13955#ifdef DEBUG
13956	for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13957		ASSERT(srdp->srd_rgnhash[i] == NULL);
13958	}
13959#endif /* DEBUG */
13960
13961	/* free each hme regions in the srd */
13962	for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13963		nrgnp = rgnp->rgn_next;
13964		ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13965		ASSERT(rgnp->rgn_refcnt == 0);
13966		ASSERT(rgnp->rgn_sfmmu_head == NULL);
13967		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13968		ASSERT(rgnp->rgn_hmeflags == 0);
13969		ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13970#ifdef DEBUG
13971		for (i = 0; i < MMU_PAGE_SIZES; i++) {
13972			ASSERT(rgnp->rgn_ttecnt[i] == 0);
13973		}
13974		rgns++;
13975#endif /* DEBUG */
13976		kmem_cache_free(region_cache, rgnp);
13977	}
13978	ASSERT(rgns == srdp->srd_next_hmerid);
13979
13980#ifdef DEBUG
13981	rgns = 0;
13982#endif
13983	/* free each ism rgns in the srd */
13984	for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13985		nrgnp = rgnp->rgn_next;
13986		ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13987		ASSERT(rgnp->rgn_refcnt == 0);
13988		ASSERT(rgnp->rgn_sfmmu_head == NULL);
13989		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13990		ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13991#ifdef DEBUG
13992		for (i = 0; i < MMU_PAGE_SIZES; i++) {
13993			ASSERT(rgnp->rgn_ttecnt[i] == 0);
13994		}
13995		rgns++;
13996#endif /* DEBUG */
13997		kmem_cache_free(region_cache, rgnp);
13998	}
13999	ASSERT(rgns == srdp->srd_next_ismrid);
14000	ASSERT(srdp->srd_ismbusyrgns == 0);
14001	ASSERT(srdp->srd_hmebusyrgns == 0);
14002
14003	srdp->srd_next_ismrid = 0;
14004	srdp->srd_next_hmerid = 0;
14005
14006	bzero((void *)srdp->srd_ismrgnp,
14007	    sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
14008	bzero((void *)srdp->srd_hmergnp,
14009	    sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
14010
14011	ASSERT(srdp->srd_scdp == NULL);
14012	kmem_cache_free(srd_cache, srdp);
14013}
14014
14015/* ARGSUSED */
14016static int
14017sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
14018{
14019	sf_srd_t *srdp = (sf_srd_t *)buf;
14020	bzero(buf, sizeof (*srdp));
14021
14022	mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
14023	mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
14024	return (0);
14025}
14026
14027/* ARGSUSED */
14028static void
14029sfmmu_srdcache_destructor(void *buf, void *cdrarg)
14030{
14031	sf_srd_t *srdp = (sf_srd_t *)buf;
14032
14033	mutex_destroy(&srdp->srd_mutex);
14034	mutex_destroy(&srdp->srd_scd_mutex);
14035}
14036
14037/*
14038 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
14039 * at the same time for the same process and address range. This is ensured by
14040 * the fact that address space is locked as writer when a process joins the
14041 * regions. Therefore there's no need to hold an srd lock during the entire
14042 * execution of hat_join_region()/hat_leave_region().
14043 */
14044
14045#define	RGN_HASH_FUNCTION(obj)	(((((uintptr_t)(obj)) >> 4) ^ \
14046				    (((uintptr_t)(obj)) >> 11)) & \
14047					srd_rgn_hashmask)
14048/*
14049 * This routine implements the shared context functionality required when
14050 * attaching a segment to an address space. It must be called from
14051 * hat_share() for D(ISM) segments and from segvn_create() for segments
14052 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
14053 * which is saved in the private segment data for hme segments and
14054 * the ism_map structure for ism segments.
14055 */
14056hat_region_cookie_t
14057hat_join_region(struct hat *sfmmup,
14058	caddr_t r_saddr,
14059	size_t r_size,
14060	void *r_obj,
14061	u_offset_t r_objoff,
14062	uchar_t r_perm,
14063	uchar_t r_pgszc,
14064	hat_rgn_cb_func_t r_cb_function,
14065	uint_t flags)
14066{
14067	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14068	uint_t rhash;
14069	uint_t rid;
14070	hatlock_t *hatlockp;
14071	sf_region_t *rgnp;
14072	sf_region_t *new_rgnp = NULL;
14073	int i;
14074	uint16_t *nextidp;
14075	sf_region_t **freelistp;
14076	int maxids;
14077	sf_region_t **rarrp;
14078	uint16_t *busyrgnsp;
14079	ulong_t rttecnt;
14080	uchar_t tteflag;
14081	uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14082	int text = (r_type == HAT_REGION_TEXT);
14083
14084	if (srdp == NULL || r_size == 0) {
14085		return (HAT_INVALID_REGION_COOKIE);
14086	}
14087
14088	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14089	ASSERT(sfmmup != ksfmmup);
14090	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14091	ASSERT(srdp->srd_refcnt > 0);
14092	ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14093	ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14094	ASSERT(r_pgszc < mmu_page_sizes);
14095	if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14096	    !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14097		panic("hat_join_region: region addr or size is not aligned\n");
14098	}
14099
14100
14101	r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14102	    SFMMU_REGION_HME;
14103	/*
14104	 * Currently only support shared hmes for the read only main text
14105	 * region.
14106	 */
14107	if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
14108	    (r_perm & PROT_WRITE))) {
14109		return (HAT_INVALID_REGION_COOKIE);
14110	}
14111
14112	rhash = RGN_HASH_FUNCTION(r_obj);
14113
14114	if (r_type == SFMMU_REGION_ISM) {
14115		nextidp = &srdp->srd_next_ismrid;
14116		freelistp = &srdp->srd_ismrgnfree;
14117		maxids = SFMMU_MAX_ISM_REGIONS;
14118		rarrp = srdp->srd_ismrgnp;
14119		busyrgnsp = &srdp->srd_ismbusyrgns;
14120	} else {
14121		nextidp = &srdp->srd_next_hmerid;
14122		freelistp = &srdp->srd_hmergnfree;
14123		maxids = SFMMU_MAX_HME_REGIONS;
14124		rarrp = srdp->srd_hmergnp;
14125		busyrgnsp = &srdp->srd_hmebusyrgns;
14126	}
14127
14128	mutex_enter(&srdp->srd_mutex);
14129
14130	for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14131	    rgnp = rgnp->rgn_hash) {
14132		if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
14133		    rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
14134		    rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
14135			break;
14136		}
14137	}
14138
14139rfound:
14140	if (rgnp != NULL) {
14141		ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14142		ASSERT(rgnp->rgn_cb_function == r_cb_function);
14143		ASSERT(rgnp->rgn_refcnt >= 0);
14144		rid = rgnp->rgn_id;
14145		ASSERT(rid < maxids);
14146		ASSERT(rarrp[rid] == rgnp);
14147		ASSERT(rid < *nextidp);
14148		atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
14149		mutex_exit(&srdp->srd_mutex);
14150		if (new_rgnp != NULL) {
14151			kmem_cache_free(region_cache, new_rgnp);
14152		}
14153		if (r_type == SFMMU_REGION_HME) {
14154			int myjoin =
14155			    (sfmmup == astosfmmu(curthread->t_procp->p_as));
14156
14157			sfmmu_link_to_hmeregion(sfmmup, rgnp);
14158			/*
14159			 * bitmap should be updated after linking sfmmu on
14160			 * region list so that pageunload() doesn't skip
14161			 * TSB/TLB flush. As soon as bitmap is updated another
14162			 * thread in this process can already start accessing
14163			 * this region.
14164			 */
14165			/*
14166			 * Normally ttecnt accounting is done as part of
14167			 * pagefault handling. But a process may not take any
14168			 * pagefaults on shared hmeblks created by some other
14169			 * process. To compensate for this assume that the
14170			 * entire region will end up faulted in using
14171			 * the region's pagesize.
14172			 *
14173			 */
14174			if (r_pgszc > TTE8K) {
14175				tteflag = 1 << r_pgszc;
14176				if (disable_large_pages & tteflag) {
14177					tteflag = 0;
14178				}
14179			} else {
14180				tteflag = 0;
14181			}
14182			if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
14183				hatlockp = sfmmu_hat_enter(sfmmup);
14184				sfmmup->sfmmu_rtteflags |= tteflag;
14185				sfmmu_hat_exit(hatlockp);
14186			}
14187			hatlockp = sfmmu_hat_enter(sfmmup);
14188
14189			/*
14190			 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
14191			 * region to allow for large page allocation failure.
14192			 */
14193			if (r_pgszc >= TTE4M) {
14194				sfmmup->sfmmu_tsb0_4minflcnt +=
14195				    r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14196			}
14197
14198			/* update sfmmu_ttecnt with the shme rgn ttecnt */
14199			rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14200			atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14201			    rttecnt);
14202
14203			if (text && r_pgszc >= TTE4M &&
14204			    (tteflag || ((disable_large_pages >> TTE4M) &
14205			    ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
14206			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
14207				SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
14208			}
14209
14210			sfmmu_hat_exit(hatlockp);
14211			/*
14212			 * On Panther we need to make sure TLB is programmed
14213			 * to accept 32M/256M pages.  Call
14214			 * sfmmu_check_page_sizes() now to make sure TLB is
14215			 * setup before making hmeregions visible to other
14216			 * threads.
14217			 */
14218			sfmmu_check_page_sizes(sfmmup, 1);
14219			hatlockp = sfmmu_hat_enter(sfmmup);
14220			SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14221
14222			/*
14223			 * if context is invalid tsb miss exception code will
14224			 * call sfmmu_check_page_sizes() and update tsbmiss
14225			 * area later.
14226			 */
14227			kpreempt_disable();
14228			if (myjoin &&
14229			    (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
14230			    != INVALID_CONTEXT)) {
14231				struct tsbmiss *tsbmp;
14232
14233				tsbmp = &tsbmiss_area[CPU->cpu_id];
14234				ASSERT(sfmmup == tsbmp->usfmmup);
14235				BT_SET(tsbmp->shmermap, rid);
14236				if (r_pgszc > TTE64K) {
14237					tsbmp->uhat_rtteflags |= tteflag;
14238				}
14239
14240			}
14241			kpreempt_enable();
14242
14243			sfmmu_hat_exit(hatlockp);
14244			ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
14245			    HAT_INVALID_REGION_COOKIE);
14246		} else {
14247			hatlockp = sfmmu_hat_enter(sfmmup);
14248			SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14249			sfmmu_hat_exit(hatlockp);
14250		}
14251		ASSERT(rid < maxids);
14252
14253		if (r_type == SFMMU_REGION_ISM) {
14254			sfmmu_find_scd(sfmmup);
14255		}
14256		return ((hat_region_cookie_t)((uint64_t)rid));
14257	}
14258
14259	ASSERT(new_rgnp == NULL);
14260
14261	if (*busyrgnsp >= maxids) {
14262		mutex_exit(&srdp->srd_mutex);
14263		return (HAT_INVALID_REGION_COOKIE);
14264	}
14265
14266	ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14267	if (*freelistp != NULL) {
14268		rgnp = *freelistp;
14269		*freelistp = rgnp->rgn_next;
14270		ASSERT(rgnp->rgn_id < *nextidp);
14271		ASSERT(rgnp->rgn_id < maxids);
14272		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14273		ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14274		    == r_type);
14275		ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14276		ASSERT(rgnp->rgn_hmeflags == 0);
14277	} else {
14278		/*
14279		 * release local locks before memory allocation.
14280		 */
14281		mutex_exit(&srdp->srd_mutex);
14282
14283		new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14284
14285		mutex_enter(&srdp->srd_mutex);
14286		for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14287		    rgnp = rgnp->rgn_hash) {
14288			if (rgnp->rgn_saddr == r_saddr &&
14289			    rgnp->rgn_size == r_size &&
14290			    rgnp->rgn_obj == r_obj &&
14291			    rgnp->rgn_objoff == r_objoff &&
14292			    rgnp->rgn_perm == r_perm &&
14293			    rgnp->rgn_pgszc == r_pgszc) {
14294				break;
14295			}
14296		}
14297		if (rgnp != NULL) {
14298			goto rfound;
14299		}
14300
14301		if (*nextidp >= maxids) {
14302			mutex_exit(&srdp->srd_mutex);
14303			goto fail;
14304		}
14305		rgnp = new_rgnp;
14306		new_rgnp = NULL;
14307		rgnp->rgn_id = (*nextidp)++;
14308		ASSERT(rgnp->rgn_id < maxids);
14309		ASSERT(rarrp[rgnp->rgn_id] == NULL);
14310		rarrp[rgnp->rgn_id] = rgnp;
14311	}
14312
14313	ASSERT(rgnp->rgn_sfmmu_head == NULL);
14314	ASSERT(rgnp->rgn_hmeflags == 0);
14315#ifdef DEBUG
14316	for (i = 0; i < MMU_PAGE_SIZES; i++) {
14317		ASSERT(rgnp->rgn_ttecnt[i] == 0);
14318	}
14319#endif
14320	rgnp->rgn_saddr = r_saddr;
14321	rgnp->rgn_size = r_size;
14322	rgnp->rgn_obj = r_obj;
14323	rgnp->rgn_objoff = r_objoff;
14324	rgnp->rgn_perm = r_perm;
14325	rgnp->rgn_pgszc = r_pgszc;
14326	rgnp->rgn_flags = r_type;
14327	rgnp->rgn_refcnt = 0;
14328	rgnp->rgn_cb_function = r_cb_function;
14329	rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14330	srdp->srd_rgnhash[rhash] = rgnp;
14331	(*busyrgnsp)++;
14332	ASSERT(*busyrgnsp <= maxids);
14333	goto rfound;
14334
14335fail:
14336	ASSERT(new_rgnp != NULL);
14337	kmem_cache_free(region_cache, new_rgnp);
14338	return (HAT_INVALID_REGION_COOKIE);
14339}
14340
14341/*
14342 * This function implements the shared context functionality required
14343 * when detaching a segment from an address space. It must be called
14344 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14345 * for segments with a valid region_cookie.
14346 * It will also be called from all seg_vn routines which change a
14347 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14348 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14349 * from segvn_fault().
14350 */
14351void
14352hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14353{
14354	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14355	sf_scd_t *scdp;
14356	uint_t rhash;
14357	uint_t rid = (uint_t)((uint64_t)rcookie);
14358	hatlock_t *hatlockp = NULL;
14359	sf_region_t *rgnp;
14360	sf_region_t **prev_rgnpp;
14361	sf_region_t *cur_rgnp;
14362	void *r_obj;
14363	int i;
14364	caddr_t	r_saddr;
14365	caddr_t r_eaddr;
14366	size_t	r_size;
14367	uchar_t	r_pgszc;
14368	uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14369
14370	ASSERT(sfmmup != ksfmmup);
14371	ASSERT(srdp != NULL);
14372	ASSERT(srdp->srd_refcnt > 0);
14373	ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14374	ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14375	ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14376
14377	r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14378	    SFMMU_REGION_HME;
14379
14380	if (r_type == SFMMU_REGION_ISM) {
14381		ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14382		ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14383		rgnp = srdp->srd_ismrgnp[rid];
14384	} else {
14385		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14386		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14387		rgnp = srdp->srd_hmergnp[rid];
14388	}
14389	ASSERT(rgnp != NULL);
14390	ASSERT(rgnp->rgn_id == rid);
14391	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14392	ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14393	ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14394
14395	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14396	if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14397		xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14398		    rgnp->rgn_size, 0, NULL);
14399	}
14400
14401	if (sfmmup->sfmmu_free) {
14402		ulong_t rttecnt;
14403		r_pgszc = rgnp->rgn_pgszc;
14404		r_size = rgnp->rgn_size;
14405
14406		ASSERT(sfmmup->sfmmu_scdp == NULL);
14407		if (r_type == SFMMU_REGION_ISM) {
14408			SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14409		} else {
14410			/* update shme rgns ttecnt in sfmmu_ttecnt */
14411			rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14412			ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14413
14414			atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14415			    -rttecnt);
14416
14417			SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14418		}
14419	} else if (r_type == SFMMU_REGION_ISM) {
14420		hatlockp = sfmmu_hat_enter(sfmmup);
14421		ASSERT(rid < srdp->srd_next_ismrid);
14422		SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14423		scdp = sfmmup->sfmmu_scdp;
14424		if (scdp != NULL &&
14425		    SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14426			sfmmu_leave_scd(sfmmup, r_type);
14427			ASSERT(sfmmu_hat_lock_held(sfmmup));
14428		}
14429		sfmmu_hat_exit(hatlockp);
14430	} else {
14431		ulong_t rttecnt;
14432		r_pgszc = rgnp->rgn_pgszc;
14433		r_saddr = rgnp->rgn_saddr;
14434		r_size = rgnp->rgn_size;
14435		r_eaddr = r_saddr + r_size;
14436
14437		ASSERT(r_type == SFMMU_REGION_HME);
14438		hatlockp = sfmmu_hat_enter(sfmmup);
14439		ASSERT(rid < srdp->srd_next_hmerid);
14440		SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14441
14442		/*
14443		 * If region is part of an SCD call sfmmu_leave_scd().
14444		 * Otherwise if process is not exiting and has valid context
14445		 * just drop the context on the floor to lose stale TLB
14446		 * entries and force the update of tsb miss area to reflect
14447		 * the new region map. After that clean our TSB entries.
14448		 */
14449		scdp = sfmmup->sfmmu_scdp;
14450		if (scdp != NULL &&
14451		    SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14452			sfmmu_leave_scd(sfmmup, r_type);
14453			ASSERT(sfmmu_hat_lock_held(sfmmup));
14454		}
14455		sfmmu_invalidate_ctx(sfmmup);
14456
14457		i = TTE8K;
14458		while (i < mmu_page_sizes) {
14459			if (rgnp->rgn_ttecnt[i] != 0) {
14460				sfmmu_unload_tsb_range(sfmmup, r_saddr,
14461				    r_eaddr, i);
14462				if (i < TTE4M) {
14463					i = TTE4M;
14464					continue;
14465				} else {
14466					break;
14467				}
14468			}
14469			i++;
14470		}
14471		/* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14472		if (r_pgszc >= TTE4M) {
14473			rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14474			ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14475			    rttecnt);
14476			sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14477		}
14478
14479		/* update shme rgns ttecnt in sfmmu_ttecnt */
14480		rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14481		ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14482		atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14483
14484		sfmmu_hat_exit(hatlockp);
14485		if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14486			/* sfmmup left the scd, grow private tsb */
14487			sfmmu_check_page_sizes(sfmmup, 1);
14488		} else {
14489			sfmmu_check_page_sizes(sfmmup, 0);
14490		}
14491	}
14492
14493	if (r_type == SFMMU_REGION_HME) {
14494		sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14495	}
14496
14497	r_obj = rgnp->rgn_obj;
14498	if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) {
14499		return;
14500	}
14501
14502	/*
14503	 * looks like nobody uses this region anymore. Free it.
14504	 */
14505	rhash = RGN_HASH_FUNCTION(r_obj);
14506	mutex_enter(&srdp->srd_mutex);
14507	for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14508	    (cur_rgnp = *prev_rgnpp) != NULL;
14509	    prev_rgnpp = &cur_rgnp->rgn_hash) {
14510		if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14511			break;
14512		}
14513	}
14514
14515	if (cur_rgnp == NULL) {
14516		mutex_exit(&srdp->srd_mutex);
14517		return;
14518	}
14519
14520	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14521	*prev_rgnpp = rgnp->rgn_hash;
14522	if (r_type == SFMMU_REGION_ISM) {
14523		rgnp->rgn_flags |= SFMMU_REGION_FREE;
14524		ASSERT(rid < srdp->srd_next_ismrid);
14525		rgnp->rgn_next = srdp->srd_ismrgnfree;
14526		srdp->srd_ismrgnfree = rgnp;
14527		ASSERT(srdp->srd_ismbusyrgns > 0);
14528		srdp->srd_ismbusyrgns--;
14529		mutex_exit(&srdp->srd_mutex);
14530		return;
14531	}
14532	mutex_exit(&srdp->srd_mutex);
14533
14534	/*
14535	 * Destroy region's hmeblks.
14536	 */
14537	sfmmu_unload_hmeregion(srdp, rgnp);
14538
14539	rgnp->rgn_hmeflags = 0;
14540
14541	ASSERT(rgnp->rgn_sfmmu_head == NULL);
14542	ASSERT(rgnp->rgn_id == rid);
14543	for (i = 0; i < MMU_PAGE_SIZES; i++) {
14544		rgnp->rgn_ttecnt[i] = 0;
14545	}
14546	rgnp->rgn_flags |= SFMMU_REGION_FREE;
14547	mutex_enter(&srdp->srd_mutex);
14548	ASSERT(rid < srdp->srd_next_hmerid);
14549	rgnp->rgn_next = srdp->srd_hmergnfree;
14550	srdp->srd_hmergnfree = rgnp;
14551	ASSERT(srdp->srd_hmebusyrgns > 0);
14552	srdp->srd_hmebusyrgns--;
14553	mutex_exit(&srdp->srd_mutex);
14554}
14555
14556/*
14557 * For now only called for hmeblk regions and not for ISM regions.
14558 */
14559void
14560hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14561{
14562	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14563	uint_t rid = (uint_t)((uint64_t)rcookie);
14564	sf_region_t *rgnp;
14565	sf_rgn_link_t *rlink;
14566	sf_rgn_link_t *hrlink;
14567	ulong_t	rttecnt;
14568
14569	ASSERT(sfmmup != ksfmmup);
14570	ASSERT(srdp != NULL);
14571	ASSERT(srdp->srd_refcnt > 0);
14572
14573	ASSERT(rid < srdp->srd_next_hmerid);
14574	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14575	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14576
14577	rgnp = srdp->srd_hmergnp[rid];
14578	ASSERT(rgnp->rgn_refcnt > 0);
14579	ASSERT(rgnp->rgn_id == rid);
14580	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14581	ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14582
14583	atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
14584
14585	/* LINTED: constant in conditional context */
14586	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14587	ASSERT(rlink != NULL);
14588	mutex_enter(&rgnp->rgn_mutex);
14589	ASSERT(rgnp->rgn_sfmmu_head != NULL);
14590	/* LINTED: constant in conditional context */
14591	SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14592	ASSERT(hrlink != NULL);
14593	ASSERT(hrlink->prev == NULL);
14594	rlink->next = rgnp->rgn_sfmmu_head;
14595	rlink->prev = NULL;
14596	hrlink->prev = sfmmup;
14597	/*
14598	 * make sure rlink's next field is correct
14599	 * before making this link visible.
14600	 */
14601	membar_stst();
14602	rgnp->rgn_sfmmu_head = sfmmup;
14603	mutex_exit(&rgnp->rgn_mutex);
14604
14605	/* update sfmmu_ttecnt with the shme rgn ttecnt */
14606	rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14607	atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14608	/* update tsb0 inflation count */
14609	if (rgnp->rgn_pgszc >= TTE4M) {
14610		sfmmup->sfmmu_tsb0_4minflcnt +=
14611		    rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14612	}
14613	/*
14614	 * Update regionid bitmask without hat lock since no other thread
14615	 * can update this region bitmask right now.
14616	 */
14617	SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14618}
14619
14620/* ARGSUSED */
14621static int
14622sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14623{
14624	sf_region_t *rgnp = (sf_region_t *)buf;
14625	bzero(buf, sizeof (*rgnp));
14626
14627	mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14628
14629	return (0);
14630}
14631
14632/* ARGSUSED */
14633static void
14634sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14635{
14636	sf_region_t *rgnp = (sf_region_t *)buf;
14637	mutex_destroy(&rgnp->rgn_mutex);
14638}
14639
14640static int
14641sfrgnmap_isnull(sf_region_map_t *map)
14642{
14643	int i;
14644
14645	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14646		if (map->bitmap[i] != 0) {
14647			return (0);
14648		}
14649	}
14650	return (1);
14651}
14652
14653static int
14654sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14655{
14656	int i;
14657
14658	for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14659		if (map->bitmap[i] != 0) {
14660			return (0);
14661		}
14662	}
14663	return (1);
14664}
14665
14666#ifdef DEBUG
14667static void
14668check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14669{
14670	sfmmu_t *sp;
14671	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14672
14673	for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14674		ASSERT(srdp == sp->sfmmu_srdp);
14675		if (sp == sfmmup) {
14676			if (onlist) {
14677				return;
14678			} else {
14679				panic("shctx: sfmmu 0x%p found on scd"
14680				    "list 0x%p", (void *)sfmmup,
14681				    (void *)*headp);
14682			}
14683		}
14684	}
14685	if (onlist) {
14686		panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14687		    (void *)sfmmup, (void *)*headp);
14688	} else {
14689		return;
14690	}
14691}
14692#else /* DEBUG */
14693#define	check_scd_sfmmu_list(headp, sfmmup, onlist)
14694#endif /* DEBUG */
14695
14696/*
14697 * Removes an sfmmu from the SCD sfmmu list.
14698 */
14699static void
14700sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14701{
14702	ASSERT(sfmmup->sfmmu_srdp != NULL);
14703	check_scd_sfmmu_list(headp, sfmmup, 1);
14704	if (sfmmup->sfmmu_scd_link.prev != NULL) {
14705		ASSERT(*headp != sfmmup);
14706		sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14707		    sfmmup->sfmmu_scd_link.next;
14708	} else {
14709		ASSERT(*headp == sfmmup);
14710		*headp = sfmmup->sfmmu_scd_link.next;
14711	}
14712	if (sfmmup->sfmmu_scd_link.next != NULL) {
14713		sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14714		    sfmmup->sfmmu_scd_link.prev;
14715	}
14716}
14717
14718
14719/*
14720 * Adds an sfmmu to the start of the queue.
14721 */
14722static void
14723sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14724{
14725	check_scd_sfmmu_list(headp, sfmmup, 0);
14726	sfmmup->sfmmu_scd_link.prev = NULL;
14727	sfmmup->sfmmu_scd_link.next = *headp;
14728	if (*headp != NULL)
14729		(*headp)->sfmmu_scd_link.prev = sfmmup;
14730	*headp = sfmmup;
14731}
14732
14733/*
14734 * Remove an scd from the start of the queue.
14735 */
14736static void
14737sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14738{
14739	if (scdp->scd_prev != NULL) {
14740		ASSERT(*headp != scdp);
14741		scdp->scd_prev->scd_next = scdp->scd_next;
14742	} else {
14743		ASSERT(*headp == scdp);
14744		*headp = scdp->scd_next;
14745	}
14746
14747	if (scdp->scd_next != NULL) {
14748		scdp->scd_next->scd_prev = scdp->scd_prev;
14749	}
14750}
14751
14752/*
14753 * Add an scd to the start of the queue.
14754 */
14755static void
14756sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14757{
14758	scdp->scd_prev = NULL;
14759	scdp->scd_next = *headp;
14760	if (*headp != NULL) {
14761		(*headp)->scd_prev = scdp;
14762	}
14763	*headp = scdp;
14764}
14765
14766static int
14767sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14768{
14769	uint_t rid;
14770	uint_t i;
14771	uint_t j;
14772	ulong_t w;
14773	sf_region_t *rgnp;
14774	ulong_t tte8k_cnt = 0;
14775	ulong_t tte4m_cnt = 0;
14776	uint_t tsb_szc;
14777	sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14778	sfmmu_t	*ism_hatid;
14779	struct tsb_info *newtsb;
14780	int szc;
14781
14782	ASSERT(srdp != NULL);
14783
14784	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14785		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14786			continue;
14787		}
14788		j = 0;
14789		while (w) {
14790			if (!(w & 0x1)) {
14791				j++;
14792				w >>= 1;
14793				continue;
14794			}
14795			rid = (i << BT_ULSHIFT) | j;
14796			j++;
14797			w >>= 1;
14798
14799			if (rid < SFMMU_MAX_HME_REGIONS) {
14800				rgnp = srdp->srd_hmergnp[rid];
14801				ASSERT(rgnp->rgn_id == rid);
14802				ASSERT(rgnp->rgn_refcnt > 0);
14803
14804				if (rgnp->rgn_pgszc < TTE4M) {
14805					tte8k_cnt += rgnp->rgn_size >>
14806					    TTE_PAGE_SHIFT(TTE8K);
14807				} else {
14808					ASSERT(rgnp->rgn_pgszc >= TTE4M);
14809					tte4m_cnt += rgnp->rgn_size >>
14810					    TTE_PAGE_SHIFT(TTE4M);
14811					/*
14812					 * Inflate SCD tsb0 by preallocating
14813					 * 1/4 8k ttecnt for 4M regions to
14814					 * allow for lgpg alloc failure.
14815					 */
14816					tte8k_cnt += rgnp->rgn_size >>
14817					    (TTE_PAGE_SHIFT(TTE8K) + 2);
14818				}
14819			} else {
14820				rid -= SFMMU_MAX_HME_REGIONS;
14821				rgnp = srdp->srd_ismrgnp[rid];
14822				ASSERT(rgnp->rgn_id == rid);
14823				ASSERT(rgnp->rgn_refcnt > 0);
14824
14825				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14826				ASSERT(ism_hatid->sfmmu_ismhat);
14827
14828				for (szc = 0; szc < TTE4M; szc++) {
14829					tte8k_cnt +=
14830					    ism_hatid->sfmmu_ttecnt[szc] <<
14831					    TTE_BSZS_SHIFT(szc);
14832				}
14833
14834				ASSERT(rgnp->rgn_pgszc >= TTE4M);
14835				if (rgnp->rgn_pgszc >= TTE4M) {
14836					tte4m_cnt += rgnp->rgn_size >>
14837					    TTE_PAGE_SHIFT(TTE4M);
14838				}
14839			}
14840		}
14841	}
14842
14843	tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14844
14845	/* Allocate both the SCD TSBs here. */
14846	if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14847	    tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14848	    (tsb_szc <= TSB_4M_SZCODE ||
14849	    sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14850	    TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14851	    TSB_ALLOC, scsfmmup))) {
14852
14853		SFMMU_STAT(sf_scd_1sttsb_allocfail);
14854		return (TSB_ALLOCFAIL);
14855	} else {
14856		scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14857
14858		if (tte4m_cnt) {
14859			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14860			if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14861			    TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14862			    (tsb_szc <= TSB_4M_SZCODE ||
14863			    sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14864			    TSB4M|TSB32M|TSB256M,
14865			    TSB_ALLOC, scsfmmup))) {
14866				/*
14867				 * If we fail to allocate the 2nd shared tsb,
14868				 * just free the 1st tsb, return failure.
14869				 */
14870				sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14871				SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14872				return (TSB_ALLOCFAIL);
14873			} else {
14874				ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14875				newtsb->tsb_flags |= TSB_SHAREDCTX;
14876				scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14877				SFMMU_STAT(sf_scd_2ndtsb_alloc);
14878			}
14879		}
14880		SFMMU_STAT(sf_scd_1sttsb_alloc);
14881	}
14882	return (TSB_SUCCESS);
14883}
14884
14885static void
14886sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14887{
14888	while (scd_sfmmu->sfmmu_tsb != NULL) {
14889		struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14890		sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14891		scd_sfmmu->sfmmu_tsb = next;
14892	}
14893}
14894
14895/*
14896 * Link the sfmmu onto the hme region list.
14897 */
14898void
14899sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14900{
14901	uint_t rid;
14902	sf_rgn_link_t *rlink;
14903	sfmmu_t *head;
14904	sf_rgn_link_t *hrlink;
14905
14906	rid = rgnp->rgn_id;
14907	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14908
14909	/* LINTED: constant in conditional context */
14910	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14911	ASSERT(rlink != NULL);
14912	mutex_enter(&rgnp->rgn_mutex);
14913	if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14914		rlink->next = NULL;
14915		rlink->prev = NULL;
14916		/*
14917		 * make sure rlink's next field is NULL
14918		 * before making this link visible.
14919		 */
14920		membar_stst();
14921		rgnp->rgn_sfmmu_head = sfmmup;
14922	} else {
14923		/* LINTED: constant in conditional context */
14924		SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14925		ASSERT(hrlink != NULL);
14926		ASSERT(hrlink->prev == NULL);
14927		rlink->next = head;
14928		rlink->prev = NULL;
14929		hrlink->prev = sfmmup;
14930		/*
14931		 * make sure rlink's next field is correct
14932		 * before making this link visible.
14933		 */
14934		membar_stst();
14935		rgnp->rgn_sfmmu_head = sfmmup;
14936	}
14937	mutex_exit(&rgnp->rgn_mutex);
14938}
14939
14940/*
14941 * Unlink the sfmmu from the hme region list.
14942 */
14943void
14944sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14945{
14946	uint_t rid;
14947	sf_rgn_link_t *rlink;
14948
14949	rid = rgnp->rgn_id;
14950	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14951
14952	/* LINTED: constant in conditional context */
14953	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14954	ASSERT(rlink != NULL);
14955	mutex_enter(&rgnp->rgn_mutex);
14956	if (rgnp->rgn_sfmmu_head == sfmmup) {
14957		sfmmu_t *next = rlink->next;
14958		rgnp->rgn_sfmmu_head = next;
14959		/*
14960		 * if we are stopped by xc_attention() after this
14961		 * point the forward link walking in
14962		 * sfmmu_rgntlb_demap() will work correctly since the
14963		 * head correctly points to the next element.
14964		 */
14965		membar_stst();
14966		rlink->next = NULL;
14967		ASSERT(rlink->prev == NULL);
14968		if (next != NULL) {
14969			sf_rgn_link_t *nrlink;
14970			/* LINTED: constant in conditional context */
14971			SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14972			ASSERT(nrlink != NULL);
14973			ASSERT(nrlink->prev == sfmmup);
14974			nrlink->prev = NULL;
14975		}
14976	} else {
14977		sfmmu_t *next = rlink->next;
14978		sfmmu_t *prev = rlink->prev;
14979		sf_rgn_link_t *prlink;
14980
14981		ASSERT(prev != NULL);
14982		/* LINTED: constant in conditional context */
14983		SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14984		ASSERT(prlink != NULL);
14985		ASSERT(prlink->next == sfmmup);
14986		prlink->next = next;
14987		/*
14988		 * if we are stopped by xc_attention()
14989		 * after this point the forward link walking
14990		 * will work correctly since the prev element
14991		 * correctly points to the next element.
14992		 */
14993		membar_stst();
14994		rlink->next = NULL;
14995		rlink->prev = NULL;
14996		if (next != NULL) {
14997			sf_rgn_link_t *nrlink;
14998			/* LINTED: constant in conditional context */
14999			SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
15000			ASSERT(nrlink != NULL);
15001			ASSERT(nrlink->prev == sfmmup);
15002			nrlink->prev = prev;
15003		}
15004	}
15005	mutex_exit(&rgnp->rgn_mutex);
15006}
15007
15008/*
15009 * Link scd sfmmu onto ism or hme region list for each region in the
15010 * scd region map.
15011 */
15012void
15013sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
15014{
15015	uint_t rid;
15016	uint_t i;
15017	uint_t j;
15018	ulong_t w;
15019	sf_region_t *rgnp;
15020	sfmmu_t *scsfmmup;
15021
15022	scsfmmup = scdp->scd_sfmmup;
15023	ASSERT(scsfmmup->sfmmu_scdhat);
15024	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
15025		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
15026			continue;
15027		}
15028		j = 0;
15029		while (w) {
15030			if (!(w & 0x1)) {
15031				j++;
15032				w >>= 1;
15033				continue;
15034			}
15035			rid = (i << BT_ULSHIFT) | j;
15036			j++;
15037			w >>= 1;
15038
15039			if (rid < SFMMU_MAX_HME_REGIONS) {
15040				rgnp = srdp->srd_hmergnp[rid];
15041				ASSERT(rgnp->rgn_id == rid);
15042				ASSERT(rgnp->rgn_refcnt > 0);
15043				sfmmu_link_to_hmeregion(scsfmmup, rgnp);
15044			} else {
15045				sfmmu_t *ism_hatid = NULL;
15046				ism_ment_t *ism_ment;
15047				rid -= SFMMU_MAX_HME_REGIONS;
15048				rgnp = srdp->srd_ismrgnp[rid];
15049				ASSERT(rgnp->rgn_id == rid);
15050				ASSERT(rgnp->rgn_refcnt > 0);
15051
15052				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
15053				ASSERT(ism_hatid->sfmmu_ismhat);
15054				ism_ment = &scdp->scd_ism_links[rid];
15055				ism_ment->iment_hat = scsfmmup;
15056				ism_ment->iment_base_va = rgnp->rgn_saddr;
15057				mutex_enter(&ism_mlist_lock);
15058				iment_add(ism_ment, ism_hatid);
15059				mutex_exit(&ism_mlist_lock);
15060
15061			}
15062		}
15063	}
15064}
15065/*
15066 * Unlink scd sfmmu from ism or hme region list for each region in the
15067 * scd region map.
15068 */
15069void
15070sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
15071{
15072	uint_t rid;
15073	uint_t i;
15074	uint_t j;
15075	ulong_t w;
15076	sf_region_t *rgnp;
15077	sfmmu_t *scsfmmup;
15078
15079	scsfmmup = scdp->scd_sfmmup;
15080	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
15081		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
15082			continue;
15083		}
15084		j = 0;
15085		while (w) {
15086			if (!(w & 0x1)) {
15087				j++;
15088				w >>= 1;
15089				continue;
15090			}
15091			rid = (i << BT_ULSHIFT) | j;
15092			j++;
15093			w >>= 1;
15094
15095			if (rid < SFMMU_MAX_HME_REGIONS) {
15096				rgnp = srdp->srd_hmergnp[rid];
15097				ASSERT(rgnp->rgn_id == rid);
15098				ASSERT(rgnp->rgn_refcnt > 0);
15099				sfmmu_unlink_from_hmeregion(scsfmmup,
15100				    rgnp);
15101
15102			} else {
15103				sfmmu_t *ism_hatid = NULL;
15104				ism_ment_t *ism_ment;
15105				rid -= SFMMU_MAX_HME_REGIONS;
15106				rgnp = srdp->srd_ismrgnp[rid];
15107				ASSERT(rgnp->rgn_id == rid);
15108				ASSERT(rgnp->rgn_refcnt > 0);
15109
15110				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
15111				ASSERT(ism_hatid->sfmmu_ismhat);
15112				ism_ment = &scdp->scd_ism_links[rid];
15113				ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
15114				ASSERT(ism_ment->iment_base_va ==
15115				    rgnp->rgn_saddr);
15116				mutex_enter(&ism_mlist_lock);
15117				iment_sub(ism_ment, ism_hatid);
15118				mutex_exit(&ism_mlist_lock);
15119
15120			}
15121		}
15122	}
15123}
15124/*
15125 * Allocates and initialises a new SCD structure, this is called with
15126 * the srd_scd_mutex held and returns with the reference count
15127 * initialised to 1.
15128 */
15129static sf_scd_t *
15130sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
15131{
15132	sf_scd_t *new_scdp;
15133	sfmmu_t *scsfmmup;
15134	int i;
15135
15136	ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
15137	new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
15138
15139	scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
15140	new_scdp->scd_sfmmup = scsfmmup;
15141	scsfmmup->sfmmu_srdp = srdp;
15142	scsfmmup->sfmmu_scdp = new_scdp;
15143	scsfmmup->sfmmu_tsb0_4minflcnt = 0;
15144	scsfmmup->sfmmu_scdhat = 1;
15145	CPUSET_ALL(scsfmmup->sfmmu_cpusran);
15146	bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
15147
15148	ASSERT(max_mmu_ctxdoms > 0);
15149	for (i = 0; i < max_mmu_ctxdoms; i++) {
15150		scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
15151		scsfmmup->sfmmu_ctxs[i].gnum = 0;
15152	}
15153
15154	for (i = 0; i < MMU_PAGE_SIZES; i++) {
15155		new_scdp->scd_rttecnt[i] = 0;
15156	}
15157
15158	new_scdp->scd_region_map = *new_map;
15159	new_scdp->scd_refcnt = 1;
15160	if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
15161		kmem_cache_free(scd_cache, new_scdp);
15162		kmem_cache_free(sfmmuid_cache, scsfmmup);
15163		return (NULL);
15164	}
15165	if (&mmu_init_scd) {
15166		mmu_init_scd(new_scdp);
15167	}
15168	return (new_scdp);
15169}
15170
15171/*
15172 * The first phase of a process joining an SCD. The hat structure is
15173 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
15174 * and a cross-call with context invalidation is used to cause the
15175 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
15176 * routine.
15177 */
15178static void
15179sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
15180{
15181	hatlock_t *hatlockp;
15182	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15183	int i;
15184	sf_scd_t *old_scdp;
15185
15186	ASSERT(srdp != NULL);
15187	ASSERT(scdp != NULL);
15188	ASSERT(scdp->scd_refcnt > 0);
15189	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15190
15191	if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
15192		ASSERT(old_scdp != scdp);
15193
15194		mutex_enter(&old_scdp->scd_mutex);
15195		sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
15196		mutex_exit(&old_scdp->scd_mutex);
15197		/*
15198		 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
15199		 * include the shme rgn ttecnt for rgns that
15200		 * were in the old SCD
15201		 */
15202		for (i = 0; i < mmu_page_sizes; i++) {
15203			ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15204			    old_scdp->scd_rttecnt[i]);
15205			atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15206			    sfmmup->sfmmu_scdrttecnt[i]);
15207		}
15208	}
15209
15210	/*
15211	 * Move sfmmu to the scd lists.
15212	 */
15213	mutex_enter(&scdp->scd_mutex);
15214	sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
15215	mutex_exit(&scdp->scd_mutex);
15216	SF_SCD_INCR_REF(scdp);
15217
15218	hatlockp = sfmmu_hat_enter(sfmmup);
15219	/*
15220	 * For a multi-thread process, we must stop
15221	 * all the other threads before joining the scd.
15222	 */
15223
15224	SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
15225
15226	sfmmu_invalidate_ctx(sfmmup);
15227	sfmmup->sfmmu_scdp = scdp;
15228
15229	/*
15230	 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
15231	 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
15232	 */
15233	for (i = 0; i < mmu_page_sizes; i++) {
15234		sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
15235		ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
15236		atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15237		    -sfmmup->sfmmu_scdrttecnt[i]);
15238	}
15239	/* update tsb0 inflation count */
15240	if (old_scdp != NULL) {
15241		sfmmup->sfmmu_tsb0_4minflcnt +=
15242		    old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15243	}
15244	ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
15245	    scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
15246	sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15247
15248	sfmmu_hat_exit(hatlockp);
15249
15250	if (old_scdp != NULL) {
15251		SF_SCD_DECR_REF(srdp, old_scdp);
15252	}
15253
15254}
15255
15256/*
15257 * This routine is called by a process to become part of an SCD. It is called
15258 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15259 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15260 */
15261static void
15262sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15263{
15264	struct tsb_info	*tsbinfop;
15265
15266	ASSERT(sfmmu_hat_lock_held(sfmmup));
15267	ASSERT(sfmmup->sfmmu_scdp != NULL);
15268	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15269	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15270	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15271
15272	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15273	    tsbinfop = tsbinfop->tsb_next) {
15274		if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15275			continue;
15276		}
15277		ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15278
15279		sfmmu_inv_tsb(tsbinfop->tsb_va,
15280		    TSB_BYTES(tsbinfop->tsb_szc));
15281	}
15282
15283	/* Set HAT_CTX1_FLAG for all SCD ISMs */
15284	sfmmu_ism_hatflags(sfmmup, 1);
15285
15286	SFMMU_STAT(sf_join_scd);
15287}
15288
15289/*
15290 * This routine is called in order to check if there is an SCD which matches
15291 * the process's region map if not then a new SCD may be created.
15292 */
15293static void
15294sfmmu_find_scd(sfmmu_t *sfmmup)
15295{
15296	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15297	sf_scd_t *scdp, *new_scdp;
15298	int ret;
15299
15300	ASSERT(srdp != NULL);
15301	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15302
15303	mutex_enter(&srdp->srd_scd_mutex);
15304	for (scdp = srdp->srd_scdp; scdp != NULL;
15305	    scdp = scdp->scd_next) {
15306		SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15307		    &sfmmup->sfmmu_region_map, ret);
15308		if (ret == 1) {
15309			SF_SCD_INCR_REF(scdp);
15310			mutex_exit(&srdp->srd_scd_mutex);
15311			sfmmu_join_scd(scdp, sfmmup);
15312			ASSERT(scdp->scd_refcnt >= 2);
15313			atomic_add_32((volatile uint32_t *)
15314			    &scdp->scd_refcnt, -1);
15315			return;
15316		} else {
15317			/*
15318			 * If the sfmmu region map is a subset of the scd
15319			 * region map, then the assumption is that this process
15320			 * will continue attaching to ISM segments until the
15321			 * region maps are equal.
15322			 */
15323			SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15324			    &sfmmup->sfmmu_region_map, ret);
15325			if (ret == 1) {
15326				mutex_exit(&srdp->srd_scd_mutex);
15327				return;
15328			}
15329		}
15330	}
15331
15332	ASSERT(scdp == NULL);
15333	/*
15334	 * No matching SCD has been found, create a new one.
15335	 */
15336	if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15337	    NULL) {
15338		mutex_exit(&srdp->srd_scd_mutex);
15339		return;
15340	}
15341
15342	/*
15343	 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15344	 */
15345
15346	/* Set scd_rttecnt for shme rgns in SCD */
15347	sfmmu_set_scd_rttecnt(srdp, new_scdp);
15348
15349	/*
15350	 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15351	 */
15352	sfmmu_link_scd_to_regions(srdp, new_scdp);
15353	sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15354	SFMMU_STAT_ADD(sf_create_scd, 1);
15355
15356	mutex_exit(&srdp->srd_scd_mutex);
15357	sfmmu_join_scd(new_scdp, sfmmup);
15358	ASSERT(new_scdp->scd_refcnt >= 2);
15359	atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1);
15360}
15361
15362/*
15363 * This routine is called by a process to remove itself from an SCD. It is
15364 * either called when the processes has detached from a segment or from
15365 * hat_free_start() as a result of calling exit.
15366 */
15367static void
15368sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15369{
15370	sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15371	sf_srd_t *srdp =  sfmmup->sfmmu_srdp;
15372	hatlock_t *hatlockp = TSB_HASH(sfmmup);
15373	int i;
15374
15375	ASSERT(scdp != NULL);
15376	ASSERT(srdp != NULL);
15377
15378	if (sfmmup->sfmmu_free) {
15379		/*
15380		 * If the process is part of an SCD the sfmmu is unlinked
15381		 * from scd_sf_list.
15382		 */
15383		mutex_enter(&scdp->scd_mutex);
15384		sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15385		mutex_exit(&scdp->scd_mutex);
15386		/*
15387		 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15388		 * are about to leave the SCD
15389		 */
15390		for (i = 0; i < mmu_page_sizes; i++) {
15391			ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15392			    scdp->scd_rttecnt[i]);
15393			atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15394			    sfmmup->sfmmu_scdrttecnt[i]);
15395			sfmmup->sfmmu_scdrttecnt[i] = 0;
15396		}
15397		sfmmup->sfmmu_scdp = NULL;
15398
15399		SF_SCD_DECR_REF(srdp, scdp);
15400		return;
15401	}
15402
15403	ASSERT(r_type != SFMMU_REGION_ISM ||
15404	    SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15405	ASSERT(scdp->scd_refcnt);
15406	ASSERT(!sfmmup->sfmmu_free);
15407	ASSERT(sfmmu_hat_lock_held(sfmmup));
15408	ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15409
15410	/*
15411	 * Wait for ISM maps to be updated.
15412	 */
15413	if (r_type != SFMMU_REGION_ISM) {
15414		while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15415		    sfmmup->sfmmu_scdp != NULL) {
15416			cv_wait(&sfmmup->sfmmu_tsb_cv,
15417			    HATLOCK_MUTEXP(hatlockp));
15418		}
15419
15420		if (sfmmup->sfmmu_scdp == NULL) {
15421			sfmmu_hat_exit(hatlockp);
15422			return;
15423		}
15424		SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15425	}
15426
15427	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15428		SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15429		/*
15430		 * Since HAT_JOIN_SCD was set our context
15431		 * is still invalid.
15432		 */
15433	} else {
15434		/*
15435		 * For a multi-thread process, we must stop
15436		 * all the other threads before leaving the scd.
15437		 */
15438
15439		sfmmu_invalidate_ctx(sfmmup);
15440	}
15441
15442	/* Clear all the rid's for ISM, delete flags, etc */
15443	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15444	sfmmu_ism_hatflags(sfmmup, 0);
15445
15446	/*
15447	 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15448	 * are in SCD before this sfmmup leaves the SCD.
15449	 */
15450	for (i = 0; i < mmu_page_sizes; i++) {
15451		ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15452		    scdp->scd_rttecnt[i]);
15453		atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15454		    sfmmup->sfmmu_scdrttecnt[i]);
15455		sfmmup->sfmmu_scdrttecnt[i] = 0;
15456		/* update ismttecnt to include SCD ism before hat leaves SCD */
15457		sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15458		sfmmup->sfmmu_scdismttecnt[i] = 0;
15459	}
15460	/* update tsb0 inflation count */
15461	sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15462
15463	if (r_type != SFMMU_REGION_ISM) {
15464		SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15465	}
15466	sfmmup->sfmmu_scdp = NULL;
15467
15468	sfmmu_hat_exit(hatlockp);
15469
15470	/*
15471	 * Unlink sfmmu from scd_sf_list this can be done without holding
15472	 * the hat lock as we hold the sfmmu_as lock which prevents
15473	 * hat_join_region from adding this thread to the scd again. Other
15474	 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15475	 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15476	 * while holding the hat lock.
15477	 */
15478	mutex_enter(&scdp->scd_mutex);
15479	sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15480	mutex_exit(&scdp->scd_mutex);
15481	SFMMU_STAT(sf_leave_scd);
15482
15483	SF_SCD_DECR_REF(srdp, scdp);
15484	hatlockp = sfmmu_hat_enter(sfmmup);
15485
15486}
15487
15488/*
15489 * Unlink and free up an SCD structure with a reference count of 0.
15490 */
15491static void
15492sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15493{
15494	sfmmu_t *scsfmmup;
15495	sf_scd_t *sp;
15496	hatlock_t *shatlockp;
15497	int i, ret;
15498
15499	mutex_enter(&srdp->srd_scd_mutex);
15500	for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15501		if (sp == scdp)
15502			break;
15503	}
15504	if (sp == NULL || sp->scd_refcnt) {
15505		mutex_exit(&srdp->srd_scd_mutex);
15506		return;
15507	}
15508
15509	/*
15510	 * It is possible that the scd has been freed and reallocated with a
15511	 * different region map while we've been waiting for the srd_scd_mutex.
15512	 */
15513	SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15514	if (ret != 1) {
15515		mutex_exit(&srdp->srd_scd_mutex);
15516		return;
15517	}
15518
15519	ASSERT(scdp->scd_sf_list == NULL);
15520	/*
15521	 * Unlink scd from srd_scdp list.
15522	 */
15523	sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15524	mutex_exit(&srdp->srd_scd_mutex);
15525
15526	sfmmu_unlink_scd_from_regions(srdp, scdp);
15527
15528	/* Clear shared context tsb and release ctx */
15529	scsfmmup = scdp->scd_sfmmup;
15530
15531	/*
15532	 * create a barrier so that scd will not be destroyed
15533	 * if other thread still holds the same shared hat lock.
15534	 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15535	 * shared hat lock before checking the shared tsb reloc flag.
15536	 */
15537	shatlockp = sfmmu_hat_enter(scsfmmup);
15538	sfmmu_hat_exit(shatlockp);
15539
15540	sfmmu_free_scd_tsbs(scsfmmup);
15541
15542	for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15543		if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15544			kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15545			    SFMMU_L2_HMERLINKS_SIZE);
15546			scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15547		}
15548	}
15549	kmem_cache_free(sfmmuid_cache, scsfmmup);
15550	kmem_cache_free(scd_cache, scdp);
15551	SFMMU_STAT(sf_destroy_scd);
15552}
15553
15554/*
15555 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15556 * bits which are set in the ism_region_map parameter. This flag indicates to
15557 * the tsbmiss handler that mapping for these segments should be loaded using
15558 * the shared context.
15559 */
15560static void
15561sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15562{
15563	sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15564	ism_blk_t *ism_blkp;
15565	ism_map_t *ism_map;
15566	int i, rid;
15567
15568	ASSERT(sfmmup->sfmmu_iblk != NULL);
15569	ASSERT(scdp != NULL);
15570	/*
15571	 * Note that the caller either set HAT_ISMBUSY flag or checked
15572	 * under hat lock that HAT_ISMBUSY was not set by another thread.
15573	 */
15574	ASSERT(sfmmu_hat_lock_held(sfmmup));
15575
15576	ism_blkp = sfmmup->sfmmu_iblk;
15577	while (ism_blkp != NULL) {
15578		ism_map = ism_blkp->iblk_maps;
15579		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15580			rid = ism_map[i].imap_rid;
15581			if (rid == SFMMU_INVALID_ISMRID) {
15582				continue;
15583			}
15584			ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15585			if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15586			    addflag) {
15587				ism_map[i].imap_hatflags |=
15588				    HAT_CTX1_FLAG;
15589			} else {
15590				ism_map[i].imap_hatflags &=
15591				    ~HAT_CTX1_FLAG;
15592			}
15593		}
15594		ism_blkp = ism_blkp->iblk_next;
15595	}
15596}
15597
15598static int
15599sfmmu_srd_lock_held(sf_srd_t *srdp)
15600{
15601	return (MUTEX_HELD(&srdp->srd_mutex));
15602}
15603
15604/* ARGSUSED */
15605static int
15606sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15607{
15608	sf_scd_t *scdp = (sf_scd_t *)buf;
15609
15610	bzero(buf, sizeof (sf_scd_t));
15611	mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15612	return (0);
15613}
15614
15615/* ARGSUSED */
15616static void
15617sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15618{
15619	sf_scd_t *scdp = (sf_scd_t *)buf;
15620
15621	mutex_destroy(&scdp->scd_mutex);
15622}
15623
15624/*
15625 * The listp parameter is a pointer to a list of hmeblks which are partially
15626 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15627 * freeing process is to cross-call all cpus to ensure that there are no
15628 * remaining cached references.
15629 *
15630 * If the local generation number is less than the global then we can free
15631 * hmeblks which are already on the pending queue as another cpu has completed
15632 * the cross-call.
15633 *
15634 * We cross-call to make sure that there are no threads on other cpus accessing
15635 * these hmblks and then complete the process of freeing them under the
15636 * following conditions:
15637 * 	The total number of pending hmeblks is greater than the threshold
15638 *	The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15639 *	It is at least 1 second since the last time we cross-called
15640 *
15641 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15642 */
15643static void
15644sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15645{
15646	struct hme_blk *hblkp, *pr_hblkp = NULL;
15647	int		count = 0;
15648	cpuset_t	cpuset = cpu_ready_set;
15649	cpu_hme_pend_t	*cpuhp;
15650	timestruc_t	now;
15651	int		one_second_expired = 0;
15652
15653	gethrestime_lasttick(&now);
15654
15655	for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15656		ASSERT(hblkp->hblk_shw_bit == 0);
15657		ASSERT(hblkp->hblk_shared == 0);
15658		count++;
15659		pr_hblkp = hblkp;
15660	}
15661
15662	cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15663	mutex_enter(&cpuhp->chp_mutex);
15664
15665	if ((cpuhp->chp_count + count) == 0) {
15666		mutex_exit(&cpuhp->chp_mutex);
15667		return;
15668	}
15669
15670	if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15671		one_second_expired  = 1;
15672	}
15673
15674	if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15675	    (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15676	    one_second_expired)) {
15677		/* Append global list to local */
15678		if (pr_hblkp == NULL) {
15679			*listp = cpuhp->chp_listp;
15680		} else {
15681			pr_hblkp->hblk_next = cpuhp->chp_listp;
15682		}
15683		cpuhp->chp_listp = NULL;
15684		cpuhp->chp_count = 0;
15685		cpuhp->chp_timestamp = now.tv_sec;
15686		mutex_exit(&cpuhp->chp_mutex);
15687
15688		kpreempt_disable();
15689		CPUSET_DEL(cpuset, CPU->cpu_id);
15690		xt_sync(cpuset);
15691		xt_sync(cpuset);
15692		kpreempt_enable();
15693
15694		/*
15695		 * At this stage we know that no trap handlers on other
15696		 * cpus can have references to hmeblks on the list.
15697		 */
15698		sfmmu_hblk_free(listp);
15699	} else if (*listp != NULL) {
15700		pr_hblkp->hblk_next = cpuhp->chp_listp;
15701		cpuhp->chp_listp = *listp;
15702		cpuhp->chp_count += count;
15703		*listp = NULL;
15704		mutex_exit(&cpuhp->chp_mutex);
15705	} else {
15706		mutex_exit(&cpuhp->chp_mutex);
15707	}
15708}
15709
15710/*
15711 * Add an hmeblk to the the hash list.
15712 */
15713void
15714sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15715	uint64_t hblkpa)
15716{
15717	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15718#ifdef	DEBUG
15719	if (hmebp->hmeblkp == NULL) {
15720		ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15721	}
15722#endif /* DEBUG */
15723
15724	hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15725	/*
15726	 * Since the TSB miss handler now does not lock the hash chain before
15727	 * walking it, make sure that the hmeblks nextpa is globally visible
15728	 * before we make the hmeblk globally visible by updating the chain root
15729	 * pointer in the hash bucket.
15730	 */
15731	membar_producer();
15732	hmebp->hmeh_nextpa = hblkpa;
15733	hmeblkp->hblk_next = hmebp->hmeblkp;
15734	hmebp->hmeblkp = hmeblkp;
15735
15736}
15737
15738/*
15739 * This function is the first part of a 2 part process to remove an hmeblk
15740 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15741 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15742 * a per-cpu pending list using the virtual address pointer.
15743 *
15744 * TSB miss trap handlers that start after this phase will no longer see
15745 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15746 * can still use it for further chain traversal because we haven't yet modifed
15747 * the next physical pointer or freed it.
15748 *
15749 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15750 * we reuse or free this hmeblk. This will make sure all lingering references to
15751 * the hmeblk after first phase disappear before we finally reclaim it.
15752 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15753 * during their traversal.
15754 *
15755 * The hmehash_mutex must be held when calling this function.
15756 *
15757 * Input:
15758 *	 hmebp - hme hash bucket pointer
15759 *	 hmeblkp - address of hmeblk to be removed
15760 *	 pr_hblk - virtual address of previous hmeblkp
15761 *	 listp - pointer to list of hmeblks linked by virtual address
15762 *	 free_now flag - indicates that a complete removal from the hash chains
15763 *			 is necessary.
15764 *
15765 * It is inefficient to use the free_now flag as a cross-call is required to
15766 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15767 * in short supply.
15768 */
15769void
15770sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15771    struct hme_blk *pr_hblk, struct hme_blk **listp,
15772    int free_now)
15773{
15774	int shw_size, vshift;
15775	struct hme_blk *shw_hblkp;
15776	uint_t		shw_mask, newshw_mask;
15777	caddr_t		vaddr;
15778	int		size;
15779	cpuset_t cpuset = cpu_ready_set;
15780
15781	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15782
15783	if (hmebp->hmeblkp == hmeblkp) {
15784		hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15785		hmebp->hmeblkp = hmeblkp->hblk_next;
15786	} else {
15787		pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15788		pr_hblk->hblk_next = hmeblkp->hblk_next;
15789	}
15790
15791	size = get_hblk_ttesz(hmeblkp);
15792	shw_hblkp = hmeblkp->hblk_shadow;
15793	if (shw_hblkp) {
15794		ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15795		ASSERT(!hmeblkp->hblk_shared);
15796#ifdef	DEBUG
15797		if (mmu_page_sizes == max_mmu_page_sizes) {
15798			ASSERT(size < TTE256M);
15799		} else {
15800			ASSERT(size < TTE4M);
15801		}
15802#endif /* DEBUG */
15803
15804		shw_size = get_hblk_ttesz(shw_hblkp);
15805		vaddr = (caddr_t)get_hblk_base(hmeblkp);
15806		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15807		ASSERT(vshift < 8);
15808		/*
15809		 * Atomically clear shadow mask bit
15810		 */
15811		do {
15812			shw_mask = shw_hblkp->hblk_shw_mask;
15813			ASSERT(shw_mask & (1 << vshift));
15814			newshw_mask = shw_mask & ~(1 << vshift);
15815			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
15816			    shw_mask, newshw_mask);
15817		} while (newshw_mask != shw_mask);
15818		hmeblkp->hblk_shadow = NULL;
15819	}
15820	hmeblkp->hblk_shw_bit = 0;
15821
15822	if (hmeblkp->hblk_shared) {
15823#ifdef	DEBUG
15824		sf_srd_t	*srdp;
15825		sf_region_t	*rgnp;
15826		uint_t		rid;
15827
15828		srdp = hblktosrd(hmeblkp);
15829		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15830		rid = hmeblkp->hblk_tag.htag_rid;
15831		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15832		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15833		rgnp = srdp->srd_hmergnp[rid];
15834		ASSERT(rgnp != NULL);
15835		SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15836#endif /* DEBUG */
15837		hmeblkp->hblk_shared = 0;
15838	}
15839	if (free_now) {
15840		kpreempt_disable();
15841		CPUSET_DEL(cpuset, CPU->cpu_id);
15842		xt_sync(cpuset);
15843		xt_sync(cpuset);
15844		kpreempt_enable();
15845
15846		hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15847		hmeblkp->hblk_next = NULL;
15848	} else {
15849		/* Append hmeblkp to listp for processing later. */
15850		hmeblkp->hblk_next = *listp;
15851		*listp = hmeblkp;
15852	}
15853}
15854
15855/*
15856 * This routine is called when memory is in short supply and returns a free
15857 * hmeblk of the requested size from the cpu pending lists.
15858 */
15859static struct hme_blk *
15860sfmmu_check_pending_hblks(int size)
15861{
15862	int i;
15863	struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15864	int found_hmeblk;
15865	cpuset_t cpuset = cpu_ready_set;
15866	cpu_hme_pend_t *cpuhp;
15867
15868	/* Flush cpu hblk pending queues */
15869	for (i = 0; i < NCPU; i++) {
15870		cpuhp = &cpu_hme_pend[i];
15871		if (cpuhp->chp_listp != NULL)  {
15872			mutex_enter(&cpuhp->chp_mutex);
15873			if (cpuhp->chp_listp == NULL)  {
15874				mutex_exit(&cpuhp->chp_mutex);
15875				continue;
15876			}
15877			found_hmeblk = 0;
15878			last_hmeblkp = NULL;
15879			for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15880			    hmeblkp = hmeblkp->hblk_next) {
15881				if (get_hblk_ttesz(hmeblkp) == size) {
15882					if (last_hmeblkp == NULL) {
15883						cpuhp->chp_listp =
15884						    hmeblkp->hblk_next;
15885					} else {
15886						last_hmeblkp->hblk_next =
15887						    hmeblkp->hblk_next;
15888					}
15889					ASSERT(cpuhp->chp_count > 0);
15890					cpuhp->chp_count--;
15891					found_hmeblk = 1;
15892					break;
15893				} else {
15894					last_hmeblkp = hmeblkp;
15895				}
15896			}
15897			mutex_exit(&cpuhp->chp_mutex);
15898
15899			if (found_hmeblk) {
15900				kpreempt_disable();
15901				CPUSET_DEL(cpuset, CPU->cpu_id);
15902				xt_sync(cpuset);
15903				xt_sync(cpuset);
15904				kpreempt_enable();
15905				return (hmeblkp);
15906			}
15907		}
15908	}
15909	return (NULL);
15910}
15911