1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
24 */
25
26/*
27 * Copyright (c) 2009, Intel Corporation.
28 * All rights reserved.
29 */
30
31#include <sys/ddi.h>
32#include <sys/archsystm.h>
33#include <vm/hat_i86.h>
34#include <sys/types.h>
35#include <sys/cpu.h>
36#include <sys/sysmacros.h>
37#include <sys/immu.h>
38
39/* invalidation queue table entry size */
40#define	QINV_ENTRY_SIZE		0x10
41
42/* max value of Queue Size field of Invalidation Queue Address Register */
43#define	QINV_MAX_QUEUE_SIZE	0x7
44
45/* status data size of invalidation wait descriptor */
46#define	QINV_SYNC_DATA_SIZE	0x4
47
48/* invalidation queue head and tail */
49#define	QINV_IQA_HEAD(QH)	BITX((QH), 18, 4)
50#define	QINV_IQA_TAIL_SHIFT	4
51
52/* invalidation queue entry structure */
53typedef struct qinv_inv_dsc {
54	uint64_t	lo;
55	uint64_t	hi;
56} qinv_dsc_t;
57
58/* physical contigous pages for invalidation queue */
59typedef struct qinv_mem {
60	kmutex_t	   qinv_mem_lock;
61	ddi_dma_handle_t   qinv_mem_dma_hdl;
62	ddi_acc_handle_t   qinv_mem_acc_hdl;
63	caddr_t		   qinv_mem_vaddr;
64	paddr_t		   qinv_mem_paddr;
65	uint_t		   qinv_mem_size;
66	uint16_t	   qinv_mem_head;
67	uint16_t	   qinv_mem_tail;
68} qinv_mem_t;
69
70
71/*
72 * invalidation queue state
73 *   This structure describes the state information of the
74 *   invalidation queue table and related status memeory for
75 *   invalidation wait descriptor
76 *
77 * qinv_table		- invalidation queue table
78 * qinv_sync		- sync status memory for invalidation wait descriptor
79 */
80typedef struct qinv {
81	qinv_mem_t		qinv_table;
82	qinv_mem_t		qinv_sync;
83} qinv_t;
84
85static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
86
87static struct immu_flushops immu_qinv_flushops = {
88	immu_qinv_context_fsi,
89	immu_qinv_context_dsi,
90	immu_qinv_context_gbl,
91	immu_qinv_iotlb_psi,
92	immu_qinv_iotlb_dsi,
93	immu_qinv_iotlb_gbl,
94	immu_qinv_inv_wait
95};
96
97/* helper macro for making queue invalidation descriptor */
98#define	INV_DSC_TYPE(dsc)	((dsc)->lo & 0xF)
99#define	CC_INV_DSC_HIGH		(0)
100#define	CC_INV_DSC_LOW(fm, sid, did, g)	(((uint64_t)(fm) << 48) | \
101	((uint64_t)(sid) << 32) | \
102	((uint64_t)(did) << 16) | \
103	((uint64_t)(g) << 4) | \
104	1)
105
106#define	IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
107	((uint64_t)(ih) << 6) |	\
108	((uint64_t)(am)))
109
110#define	IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
111	((uint64_t)(dr) << 7) | \
112	((uint64_t)(dw) << 6) | \
113	((uint64_t)(g) << 4) | \
114	2)
115
116#define	DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
117
118#define	DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
119	((uint64_t)(sid) << 32) | \
120	((uint64_t)(max_invs_pd) << 16) | \
121	3)
122
123#define	IEC_INV_DSC_HIGH (0)
124#define	IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
125	((uint64_t)(im) << 27) | \
126	((uint64_t)(g) << 4) | \
127	4)
128
129#define	INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
130
131#define	INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
132	((uint64_t)(fn) << 6) | \
133	((uint64_t)(sw) << 5) | \
134	((uint64_t)(iflag) << 4) | \
135	5)
136
137/*
138 * QS field of Invalidation Queue Address Register
139 * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
140 */
141static uint_t qinv_iqa_qs = 6;
142
143/*
144 * the invalidate desctiptor type of queued invalidation interface
145 */
146static char *qinv_dsc_type[] = {
147	"Reserved",
148	"Context Cache Invalidate Descriptor",
149	"IOTLB Invalidate Descriptor",
150	"Device-IOTLB Invalidate Descriptor",
151	"Interrupt Entry Cache Invalidate Descriptor",
152	"Invalidation Wait Descriptor",
153	"Incorrect queue invalidation type"
154};
155
156#define	QINV_MAX_DSC_TYPE	(sizeof (qinv_dsc_type) / sizeof (char *))
157
158/*
159 * the queued invalidation interface functions
160 */
161static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
162static void qinv_context_common(immu_t *immu, uint8_t function_mask,
163    uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
164static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
165    uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
166static void qinv_iec_common(immu_t *immu, uint_t iidx,
167    uint_t im, uint_t g);
168static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
169static void qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp);
170/*LINTED*/
171static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
172    uint64_t addr, uint_t size, uint_t max_invs_pd);
173
174
175/* submit invalidation request descriptor to invalidation queue */
176static void
177qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
178{
179	qinv_t *qinv;
180	qinv_mem_t *qinv_table;
181	uint_t tail;
182#ifdef DEBUG
183	uint_t count = 0;
184#endif
185
186	qinv = (qinv_t *)immu->immu_qinv;
187	qinv_table = &(qinv->qinv_table);
188
189	mutex_enter(&qinv_table->qinv_mem_lock);
190	tail = qinv_table->qinv_mem_tail;
191	qinv_table->qinv_mem_tail++;
192
193	if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
194		qinv_table->qinv_mem_tail = 0;
195
196	while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
197#ifdef DEBUG
198		count++;
199#endif
200		/*
201		 * inv queue table exhausted, wait hardware to fetch
202		 * next descriptor
203		 */
204		qinv_table->qinv_mem_head = QINV_IQA_HEAD(
205		    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
206	}
207
208	IMMU_DPROBE3(immu__qinv__sub, uint64_t, dsc->lo, uint64_t, dsc->hi,
209	    uint_t, count);
210
211	bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
212	    QINV_ENTRY_SIZE);
213
214	immu_regs_put64(immu, IMMU_REG_INVAL_QT,
215	    qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
216
217	mutex_exit(&qinv_table->qinv_mem_lock);
218}
219
220/* queued invalidation interface -- invalidate context cache */
221static void
222qinv_context_common(immu_t *immu, uint8_t function_mask,
223    uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
224{
225	qinv_dsc_t dsc;
226
227	dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
228	dsc.hi = CC_INV_DSC_HIGH;
229
230	qinv_submit_inv_dsc(immu, &dsc);
231}
232
233/* queued invalidation interface -- invalidate iotlb */
234static void
235qinv_iotlb_common(immu_t *immu, uint_t domain_id,
236    uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
237{
238	qinv_dsc_t dsc;
239	uint8_t dr = 0;
240	uint8_t dw = 0;
241
242	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
243		dr = 1;
244	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
245		dw = 1;
246
247	switch (type) {
248	case TLB_INV_G_PAGE:
249		if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
250		    am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
251		    addr & IMMU_PAGEOFFSET) {
252			type = TLB_INV_G_DOMAIN;
253			goto qinv_ignore_psi;
254		}
255		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
256		dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
257		break;
258
259	qinv_ignore_psi:
260	case TLB_INV_G_DOMAIN:
261		dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
262		dsc.hi = 0;
263		break;
264
265	case TLB_INV_G_GLOBAL:
266		dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
267		dsc.hi = 0;
268		break;
269	default:
270		ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
271		return;
272	}
273
274	qinv_submit_inv_dsc(immu, &dsc);
275}
276
277/* queued invalidation interface -- invalidate dev_iotlb */
278static void
279qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
280    uint64_t addr, uint_t size, uint_t max_invs_pd)
281{
282	qinv_dsc_t dsc;
283
284	dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
285	dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
286
287	qinv_submit_inv_dsc(immu, &dsc);
288}
289
290/* queued invalidation interface -- invalidate interrupt entry cache */
291static void
292qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
293{
294	qinv_dsc_t dsc;
295
296	dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
297	dsc.hi = IEC_INV_DSC_HIGH;
298
299	qinv_submit_inv_dsc(immu, &dsc);
300}
301
302/*
303 * queued invalidation interface -- invalidation wait descriptor
304 *   wait until the invalidation request finished
305 */
306static void
307qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp)
308{
309	qinv_dsc_t dsc;
310	volatile uint32_t *status;
311	uint64_t paddr;
312#ifdef DEBUG
313	uint_t count;
314#endif
315
316	status = &iwp->iwp_vstatus;
317	paddr = iwp->iwp_pstatus;
318
319	*status = IMMU_INV_DATA_PENDING;
320	membar_producer();
321
322	/*
323	 * sdata = IMMU_INV_DATA_DONE, fence = 1, sw = 1, if = 0
324	 * indicate the invalidation wait descriptor completion by
325	 * performing a coherent DWORD write to the status address,
326	 * not by generating an invalidation completion event
327	 */
328	dsc.lo = INV_WAIT_DSC_LOW(IMMU_INV_DATA_DONE, 1, 1, 0);
329	dsc.hi = INV_WAIT_DSC_HIGH(paddr);
330
331	qinv_submit_inv_dsc(immu, &dsc);
332
333	if (iwp->iwp_sync) {
334#ifdef DEBUG
335		count = 0;
336		while (*status != IMMU_INV_DATA_DONE) {
337			count++;
338			ht_pause();
339		}
340		DTRACE_PROBE2(immu__wait__sync, const char *, iwp->iwp_name,
341		    uint_t, count);
342#else
343		while (*status != IMMU_INV_DATA_DONE)
344			ht_pause();
345#endif
346	}
347}
348
349static void
350immu_qinv_inv_wait(immu_inv_wait_t *iwp)
351{
352	volatile uint32_t *status = &iwp->iwp_vstatus;
353#ifdef DEBUG
354	uint_t count;
355
356	count = 0;
357	while (*status != IMMU_INV_DATA_DONE) {
358		count++;
359		ht_pause();
360	}
361	DTRACE_PROBE2(immu__wait__async, const char *, iwp->iwp_name,
362	    uint_t, count);
363#else
364
365	while (*status != IMMU_INV_DATA_DONE)
366		ht_pause();
367#endif
368}
369
370/*
371 * call ddi_dma_mem_alloc to allocate physical contigous
372 * pages for invalidation queue table
373 */
374static int
375qinv_setup(immu_t *immu)
376{
377	qinv_t *qinv;
378	size_t size;
379
380	ddi_dma_attr_t qinv_dma_attr = {
381		DMA_ATTR_V0,
382		0U,
383		0xffffffffffffffffULL,
384		0xffffffffU,
385		MMU_PAGESIZE, /* page aligned */
386		0x1,
387		0x1,
388		0xffffffffU,
389		0xffffffffffffffffULL,
390		1,
391		4,
392		0
393	};
394
395	ddi_device_acc_attr_t qinv_acc_attr = {
396		DDI_DEVICE_ATTR_V0,
397		DDI_NEVERSWAP_ACC,
398		DDI_STRICTORDER_ACC
399	};
400
401	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
402
403
404	mutex_enter(&(immu->immu_qinv_lock));
405
406	immu->immu_qinv = NULL;
407	if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
408	    immu_qinv_enable == B_FALSE) {
409		mutex_exit(&(immu->immu_qinv_lock));
410		return (DDI_SUCCESS);
411	}
412
413	if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
414		qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
415
416	qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
417
418	if (ddi_dma_alloc_handle(root_devinfo,
419	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
420	    &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
421		ddi_err(DER_WARN, root_devinfo,
422		    "alloc invalidation queue table handler failed");
423		goto queue_table_handle_failed;
424	}
425
426	if (ddi_dma_alloc_handle(root_devinfo,
427	    &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
428	    &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
429		ddi_err(DER_WARN, root_devinfo,
430		    "alloc invalidation queue sync mem handler failed");
431		goto sync_table_handle_failed;
432	}
433
434	qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
435	size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
436
437	/* alloc physical contiguous pages for invalidation queue */
438	if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
439	    size,
440	    &qinv_acc_attr,
441	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
442	    DDI_DMA_SLEEP,
443	    NULL,
444	    &(qinv->qinv_table.qinv_mem_vaddr),
445	    &size,
446	    &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
447		ddi_err(DER_WARN, root_devinfo,
448		    "alloc invalidation queue table failed");
449		goto queue_table_mem_failed;
450	}
451
452	ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
453	bzero(qinv->qinv_table.qinv_mem_vaddr, size);
454
455	/* get the base physical address of invalidation request queue */
456	qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
457	    hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
458
459	qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
460
461	qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
462	size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
463
464	/* alloc status memory for invalidation wait descriptor */
465	if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
466	    size,
467	    &qinv_acc_attr,
468	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
469	    DDI_DMA_SLEEP,
470	    NULL,
471	    &(qinv->qinv_sync.qinv_mem_vaddr),
472	    &size,
473	    &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
474		ddi_err(DER_WARN, root_devinfo,
475		    "alloc invalidation queue sync mem failed");
476		goto sync_table_mem_failed;
477	}
478
479	ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
480	bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
481	qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
482	    hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
483
484	qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
485
486	mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
487	mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
488
489	immu->immu_qinv = qinv;
490
491	mutex_exit(&(immu->immu_qinv_lock));
492
493	return (DDI_SUCCESS);
494
495sync_table_mem_failed:
496	ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
497
498queue_table_mem_failed:
499	ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
500
501sync_table_handle_failed:
502	ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
503
504queue_table_handle_failed:
505	kmem_free(qinv, sizeof (qinv_t));
506
507	mutex_exit(&(immu->immu_qinv_lock));
508
509	return (DDI_FAILURE);
510}
511
512/*
513 * ###########################################################################
514 *
515 * Functions exported by immu_qinv.c
516 *
517 * ###########################################################################
518 */
519
520/*
521 * initialize invalidation request queue structure.
522 */
523int
524immu_qinv_setup(list_t *listp)
525{
526	immu_t *immu;
527	int nerr;
528
529	if (immu_qinv_enable == B_FALSE) {
530		return (DDI_FAILURE);
531	}
532
533	nerr = 0;
534	immu = list_head(listp);
535	for (; immu; immu = list_next(listp, immu)) {
536		if (qinv_setup(immu) == DDI_SUCCESS) {
537			immu->immu_qinv_setup = B_TRUE;
538		} else {
539			nerr++;
540			break;
541		}
542	}
543
544	return (nerr > 0 ? DDI_FAILURE : DDI_SUCCESS);
545}
546
547void
548immu_qinv_startup(immu_t *immu)
549{
550	qinv_t *qinv;
551	uint64_t qinv_reg_value;
552
553	if (immu->immu_qinv_setup == B_FALSE) {
554		return;
555	}
556
557	qinv = (qinv_t *)immu->immu_qinv;
558	qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
559	immu_regs_qinv_enable(immu, qinv_reg_value);
560	immu->immu_flushops = &immu_qinv_flushops;
561	immu->immu_qinv_running = B_TRUE;
562}
563
564/*
565 * queued invalidation interface
566 *   function based context cache invalidation
567 */
568void
569immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
570    uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
571{
572	qinv_context_common(immu, function_mask, source_id,
573	    domain_id, CTT_INV_G_DEVICE);
574	qinv_wait_sync(immu, iwp);
575}
576
577/*
578 * queued invalidation interface
579 *   domain based context cache invalidation
580 */
581void
582immu_qinv_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
583{
584	qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
585	qinv_wait_sync(immu, iwp);
586}
587
588/*
589 * queued invalidation interface
590 *   invalidation global context cache
591 */
592void
593immu_qinv_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
594{
595	qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
596	qinv_wait_sync(immu, iwp);
597}
598
599/*
600 * queued invalidation interface
601 *   paged based iotlb invalidation
602 */
603void
604immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
605	uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp)
606{
607	uint_t am = 0;
608	uint_t max_am;
609
610	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
611
612	/* choose page specified invalidation */
613	if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
614		while (am <= max_am) {
615			if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
616			    <= ADDR_AM_MAX(am)) {
617				qinv_iotlb_common(immu, domain_id,
618				    dvma, am, hint, TLB_INV_G_PAGE);
619				break;
620			}
621			am++;
622		}
623		if (am > max_am) {
624			qinv_iotlb_common(immu, domain_id,
625			    dvma, 0, hint, TLB_INV_G_DOMAIN);
626		}
627
628	/* choose domain invalidation */
629	} else {
630		qinv_iotlb_common(immu, domain_id, dvma,
631		    0, hint, TLB_INV_G_DOMAIN);
632	}
633
634	qinv_wait_sync(immu, iwp);
635}
636
637/*
638 * queued invalidation interface
639 *   domain based iotlb invalidation
640 */
641void
642immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
643{
644	qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
645	qinv_wait_sync(immu, iwp);
646}
647
648/*
649 * queued invalidation interface
650 *    global iotlb invalidation
651 */
652void
653immu_qinv_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
654{
655	qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
656	qinv_wait_sync(immu, iwp);
657}
658
659/* queued invalidation interface -- global invalidate interrupt entry cache */
660void
661immu_qinv_intr_global(immu_t *immu, immu_inv_wait_t *iwp)
662{
663	qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
664	qinv_wait_sync(immu, iwp);
665}
666
667/* queued invalidation interface -- invalidate single interrupt entry cache */
668void
669immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx, immu_inv_wait_t *iwp)
670{
671	qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
672	qinv_wait_sync(immu, iwp);
673}
674
675/* queued invalidation interface -- invalidate interrupt entry caches */
676void
677immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt,
678    immu_inv_wait_t *iwp)
679{
680	uint_t	i, mask = 0;
681
682	ASSERT(cnt != 0);
683
684	/* requested interrupt count is not a power of 2 */
685	if (!ISP2(cnt)) {
686		for (i = 0; i < cnt; i++) {
687			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
688		}
689		qinv_wait_sync(immu, iwp);
690		return;
691	}
692
693	while ((2 << mask) < cnt) {
694		mask++;
695	}
696
697	if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
698		for (i = 0; i < cnt; i++) {
699			qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
700		}
701		qinv_wait_sync(immu, iwp);
702		return;
703	}
704
705	qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
706
707	qinv_wait_sync(immu, iwp);
708}
709
710void
711immu_qinv_report_fault(immu_t *immu)
712{
713	uint16_t head;
714	qinv_dsc_t *dsc;
715	qinv_t *qinv;
716
717	/* access qinv data */
718	mutex_enter(&(immu->immu_qinv_lock));
719
720	qinv = (qinv_t *)(immu->immu_qinv);
721
722	head = QINV_IQA_HEAD(
723	    immu_regs_get64(immu, IMMU_REG_INVAL_QH));
724
725	dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
726	    + (head * QINV_ENTRY_SIZE));
727
728	/* report the error */
729	ddi_err(DER_WARN, immu->immu_dip,
730	    "generated a fault when fetching a descriptor from the"
731	    "\tinvalidation queue, or detects that the fetched"
732	    "\tdescriptor is invalid. The head register is "
733	    "0x%" PRIx64
734	    "\tthe type is %s",
735	    head,
736	    qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
737
738	mutex_exit(&(immu->immu_qinv_lock));
739}
740