1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include	<nxge_impl.h>
28#include	<npi_mac.h>
29#include	<npi_rxdma.h>
30#include	<nxge_hio.h>
31
32#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
33static int	nxge_herr2kerr(uint64_t);
34static uint64_t nxge_init_hv_fzc_lp_op(p_nxge_t, uint64_t,
35    uint64_t, uint64_t, uint64_t, uint64_t);
36#endif
37
38static nxge_status_t nxge_init_fzc_rdc_pages(p_nxge_t,
39    uint16_t, dma_log_page_t *, dma_log_page_t *);
40
41static nxge_status_t nxge_init_fzc_tdc_pages(p_nxge_t,
42    uint16_t, dma_log_page_t *, dma_log_page_t *);
43
44/*
45 * The following interfaces are controlled by the
46 * function control registers. Some global registers
47 * are to be initialized by only byt one of the 2/4 functions.
48 * Use the test and set register.
49 */
50/*ARGSUSED*/
51nxge_status_t
52nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
53{
54	npi_handle_t		handle;
55	npi_status_t		rs = NPI_SUCCESS;
56
57	handle = NXGE_DEV_NPI_HANDLE(nxgep);
58	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
59	    != NPI_SUCCESS) {
60		return (NXGE_ERROR | rs);
61	}
62
63	return (NXGE_OK);
64}
65
66nxge_status_t
67nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
68{
69	npi_handle_t		handle;
70	npi_status_t		rs = NPI_SUCCESS;
71
72	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
73
74	/*
75	 * In multi-partitioning, the partition manager
76	 * who owns function zero should set this multi-partition
77	 * control bit.
78	 */
79	if (nxgep->use_partition && nxgep->function_num) {
80		return (NXGE_ERROR);
81	}
82
83	handle = NXGE_DEV_NPI_HANDLE(nxgep);
84	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
85		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
86		    "<== nxge_set_fzc_multi_part_ctl"));
87		return (NXGE_ERROR | rs);
88	}
89
90	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
91
92	return (NXGE_OK);
93}
94
95nxge_status_t
96nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
97{
98	npi_handle_t		handle;
99	npi_status_t		rs = NPI_SUCCESS;
100
101	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
102
103	handle = NXGE_DEV_NPI_HANDLE(nxgep);
104	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
105		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
106		    "<== nxge_set_fzc_multi_part_ctl"));
107		return (NXGE_ERROR | rs);
108	}
109	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
110
111	return (NXGE_OK);
112}
113
114/*
115 * System interrupt registers that are under function zero
116 * management.
117 */
118nxge_status_t
119nxge_fzc_intr_init(p_nxge_t nxgep)
120{
121	nxge_status_t	status = NXGE_OK;
122
123	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
124
125	/* Configure the initial timer resolution */
126	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
127		return (status);
128	}
129
130	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
131		/*
132		 * Set up the logical device group's logical devices that
133		 * the group owns.
134		 */
135		if ((status = nxge_fzc_intr_ldg_num_set(nxgep)) != NXGE_OK)
136			goto fzc_intr_init_exit;
137
138		/* Configure the system interrupt data */
139		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK)
140			goto fzc_intr_init_exit;
141	}
142
143fzc_intr_init_exit:
144
145	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
146
147	return (status);
148}
149
150nxge_status_t
151nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
152{
153	p_nxge_ldg_t	ldgp;
154	p_nxge_ldv_t	ldvp;
155	npi_handle_t	handle;
156	int		i, j;
157	npi_status_t	rs = NPI_SUCCESS;
158
159	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
160
161	if (nxgep->ldgvp == NULL) {
162		return (NXGE_ERROR);
163	}
164
165	ldgp = nxgep->ldgvp->ldgp;
166	ldvp = nxgep->ldgvp->ldvp;
167	if (ldgp == NULL || ldvp == NULL) {
168		return (NXGE_ERROR);
169	}
170
171	handle = NXGE_DEV_NPI_HANDLE(nxgep);
172
173	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
174		NXGE_DEBUG_MSG((nxgep, INT_CTL,
175		    "==> nxge_fzc_intr_ldg_num_set "
176		    "<== nxge_f(Neptune): # ldv %d "
177		    "in group %d", ldgp->nldvs, ldgp->ldg));
178
179		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
180			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
181			    ldvp->ldg_assigned);
182			if (rs != NPI_SUCCESS) {
183				NXGE_DEBUG_MSG((nxgep, INT_CTL,
184				    "<== nxge_fzc_intr_ldg_num_set failed "
185				    " rs 0x%x ldv %d ldg %d",
186				    rs, ldvp->ldv, ldvp->ldg_assigned));
187				return (NXGE_ERROR | rs);
188			}
189			NXGE_DEBUG_MSG((nxgep, INT_CTL,
190			    "<== nxge_fzc_intr_ldg_num_set OK "
191			    " ldv %d ldg %d",
192			    ldvp->ldv, ldvp->ldg_assigned));
193		}
194	}
195
196	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
197
198	return (NXGE_OK);
199}
200
201nxge_status_t
202nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
203{
204	npi_handle_t	handle;
205	npi_status_t	rs = NPI_SUCCESS;
206
207	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
208	if (nxgep->ldgvp == NULL) {
209		return (NXGE_ERROR);
210	}
211	handle = NXGE_DEV_NPI_HANDLE(nxgep);
212	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
213		return (NXGE_ERROR | rs);
214	}
215	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
216
217	return (NXGE_OK);
218}
219
220nxge_status_t
221nxge_fzc_intr_sid_set(p_nxge_t nxgep)
222{
223	npi_handle_t	handle;
224	p_nxge_ldg_t	ldgp;
225	fzc_sid_t	sid;
226	int		i;
227	npi_status_t	rs = NPI_SUCCESS;
228
229	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
230	if (nxgep->ldgvp == NULL) {
231		NXGE_DEBUG_MSG((nxgep, INT_CTL,
232		    "<== nxge_fzc_intr_sid_set: no ldg"));
233		return (NXGE_ERROR);
234	}
235	handle = NXGE_DEV_NPI_HANDLE(nxgep);
236	ldgp = nxgep->ldgvp->ldgp;
237	NXGE_DEBUG_MSG((nxgep, INT_CTL,
238	    "==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
239	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
240		sid.ldg = ldgp->ldg;
241		sid.niu = B_FALSE;
242		sid.func = ldgp->func;
243		sid.vector = ldgp->vector;
244		NXGE_DEBUG_MSG((nxgep, INT_CTL,
245		    "==> nxge_fzc_intr_sid_set(%d): func %d group %d "
246		    "vector %d",
247		    i, sid.func, sid.ldg, sid.vector));
248		rs = npi_fzc_sid_set(handle, sid);
249		if (rs != NPI_SUCCESS) {
250			NXGE_DEBUG_MSG((nxgep, INT_CTL,
251			    "<== nxge_fzc_intr_sid_set:failed 0x%x",
252			    rs));
253			return (NXGE_ERROR | rs);
254		}
255	}
256
257	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
258
259	return (NXGE_OK);
260
261}
262
263/*
264 * nxge_init_fzc_rdc
265 *
266 *	Initialize all of a RDC's FZC_DMC registers.
267 *	This is executed by the service domain, on behalf of a
268 *	guest domain, who cannot access these registers.
269 *
270 * Arguments:
271 * 	nxgep
272 * 	channel		The channel to initialize.
273 *
274 * NPI_NXGE function calls:
275 *	nxge_init_fzc_rdc_pages()
276 *
277 * Context:
278 *	Service Domain
279 */
280/*ARGSUSED*/
281nxge_status_t
282nxge_init_fzc_rdc(p_nxge_t nxgep, uint16_t channel)
283{
284	nxge_status_t	status = NXGE_OK;
285
286	dma_log_page_t	page1, page2;
287	npi_handle_t	handle;
288	rdc_red_para_t	red;
289
290	/*
291	 * Initialize the RxDMA channel-specific FZC control
292	 * registers.
293	 */
294
295	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
296
297	handle = NXGE_DEV_NPI_HANDLE(nxgep);
298
299	/* Reset RXDMA channel */
300	status = npi_rxdma_cfg_rdc_reset(handle, channel);
301	if (status != NPI_SUCCESS) {
302		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
303		    "==> nxge_init_fzc_rdc: npi_rxdma_cfg_rdc_reset(%d) "
304		    "returned 0x%08x", channel, status));
305		return (NXGE_ERROR | status);
306	}
307
308	/*
309	 * These values have been copied from
310	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
311	 */
312	page1.page_num = 0;
313	page1.valid = 1;
314	page1.func_num = nxgep->function_num;
315	page1.mask = 0;
316	page1.value = 0;
317	page1.reloc = 0;
318
319	page2.page_num = 1;
320	page2.valid = 1;
321	page2.func_num = nxgep->function_num;
322	page2.mask = 0;
323	page2.value = 0;
324	page2.reloc = 0;
325
326	if (nxgep->niu_type == N2_NIU) {
327#if !defined(NIU_HV_WORKAROUND)
328		status = NXGE_OK;
329#else
330		NXGE_DEBUG_MSG((nxgep, RX_CTL,
331		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
332		    "set up logical pages"));
333		/* Initialize the RXDMA logical pages */
334		status = nxge_init_fzc_rdc_pages(nxgep, channel,
335		    &page1, &page2);
336		if (status != NXGE_OK) {
337			return (status);
338		}
339#endif
340	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
341		/* Initialize the RXDMA logical pages */
342		status = nxge_init_fzc_rdc_pages(nxgep, channel,
343		    &page1, &page2);
344		if (status != NXGE_OK) {
345			return (status);
346		}
347	} else {
348		return (NXGE_ERROR);
349	}
350
351	/*
352	 * Configure RED parameters
353	 */
354	red.value = 0;
355	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
356	red.bits.ldw.thre =
357	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
358	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
359	red.bits.ldw.thre_sync =
360	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
361
362	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
363	    "==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
364	    red.bits.ldw.thre_sync,
365	    red.bits.ldw.thre_sync));
366
367	status |= npi_rxdma_cfg_wred_param(handle, channel, &red);
368
369	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc"));
370
371	return (status);
372}
373
374/*
375 * nxge_init_fzc_rxdma_channel
376 *
377 *	Initialize all per-channel FZC_DMC registers.
378 *
379 * Arguments:
380 * 	nxgep
381 * 	channel		The channel to start
382 *
383 * NPI_NXGE function calls:
384 *	nxge_init_hv_fzc_rxdma_channel_pages()
385 *	nxge_init_fzc_rxdma_channel_pages()
386 *	nxge_init_fzc_rxdma_channel_red()
387 *
388 * Context:
389 *	Service Domain
390 */
391/*ARGSUSED*/
392nxge_status_t
393nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
394{
395	rx_rbr_ring_t		*rbr_ring;
396	rx_rcr_ring_t		*rcr_ring;
397
398	nxge_status_t		status = NXGE_OK;
399
400	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
401
402	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
403	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
404
405	if (nxgep->niu_type == N2_NIU) {
406#ifndef	NIU_HV_WORKAROUND
407#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
408		NXGE_DEBUG_MSG((nxgep, RX_CTL,
409		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
410		    "set up logical pages"));
411		/* Initialize the RXDMA logical pages */
412		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
413		    rbr_ring);
414		if (status != NXGE_OK) {
415			return (status);
416		}
417#endif
418		status = NXGE_OK;
419#else
420		NXGE_DEBUG_MSG((nxgep, RX_CTL,
421		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
422		    "set up logical pages"));
423		/* Initialize the RXDMA logical pages */
424		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
425		    rbr_ring);
426		if (status != NXGE_OK) {
427			return (status);
428		}
429#endif
430	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
431		/* Initialize the RXDMA logical pages */
432		status = nxge_init_fzc_rxdma_channel_pages(nxgep,
433		    channel, rbr_ring);
434		if (status != NXGE_OK) {
435			return (status);
436		}
437	} else {
438		return (NXGE_ERROR);
439	}
440
441	/* Configure RED parameters */
442	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_ring);
443
444	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
445	return (status);
446}
447
448/*
449 * nxge_init_fzc_rdc_pages
450 *
451 *	Configure a TDC's logical pages.
452 *
453 *	This function is executed by the service domain, on behalf of
454 *	a guest domain, to whom this RDC has been loaned.
455 *
456 * Arguments:
457 * 	nxgep
458 * 	channel		The channel to initialize.
459 * 	page0		Logical page 0 definition.
460 * 	page1		Logical page 1 definition.
461 *
462 * Notes:
463 *	I think that this function can be called from any
464 *	domain, but I need to check.
465 *
466 * NPI/NXGE function calls:
467 *	hv_niu_tx_logical_page_conf()
468 *	hv_niu_tx_logical_page_info()
469 *
470 * Context:
471 *	Any domain
472 */
473nxge_status_t
474nxge_init_fzc_rdc_pages(
475	p_nxge_t nxgep,
476	uint16_t channel,
477	dma_log_page_t *page0,
478	dma_log_page_t *page1)
479{
480	npi_handle_t handle;
481	npi_status_t rs;
482
483	uint64_t page_handle;
484
485	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
486	    "==> nxge_init_fzc_txdma_channel_pages"));
487
488#ifndef	NIU_HV_WORKAROUND
489	if (nxgep->niu_type == N2_NIU) {
490		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
491		    "<== nxge_init_fzc_rdc_pages: "
492		    "N2_NIU: no need to set rxdma logical pages"));
493		return (NXGE_OK);
494	}
495#else
496	if (nxgep->niu_type == N2_NIU) {
497		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
498		    "<== nxge_init_fzc_rdc_pages: "
499		    "N2_NIU: NEED to set rxdma logical pages"));
500	}
501#endif
502
503	/*
504	 * Initialize logical page 1.
505	 */
506	handle = NXGE_DEV_NPI_HANDLE(nxgep);
507	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page0))
508	    != NPI_SUCCESS)
509		return (NXGE_ERROR | rs);
510
511	/*
512	 * Initialize logical page 2.
513	 */
514	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page1))
515	    != NPI_SUCCESS)
516		return (NXGE_ERROR | rs);
517
518	/*
519	 * Initialize the page handle.
520	 * (In the current driver, this is always set to 0.)
521	 */
522	page_handle = 0;
523	rs = npi_rxdma_cfg_logical_page_handle(handle, channel, page_handle);
524	if (rs == NPI_SUCCESS) {
525		return (NXGE_OK);
526	} else {
527		return (NXGE_ERROR | rs);
528	}
529}
530
531/*ARGSUSED*/
532nxge_status_t
533nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
534		uint16_t channel, p_rx_rbr_ring_t rbrp)
535{
536	npi_handle_t		handle;
537	dma_log_page_t		cfg;
538	npi_status_t		rs = NPI_SUCCESS;
539
540	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
541	    "==> nxge_init_fzc_rxdma_channel_pages"));
542
543	handle = NXGE_DEV_NPI_HANDLE(nxgep);
544	/*
545	 * Initialize logical page 1.
546	 */
547	cfg.func_num = nxgep->function_num;
548	cfg.page_num = 0;
549	cfg.valid = rbrp->page_valid.bits.ldw.page0;
550	cfg.value = rbrp->page_value_1.value;
551	cfg.mask = rbrp->page_mask_1.value;
552	cfg.reloc = rbrp->page_reloc_1.value;
553	rs = npi_rxdma_cfg_logical_page(handle, channel,
554	    (p_dma_log_page_t)&cfg);
555	if (rs != NPI_SUCCESS) {
556		return (NXGE_ERROR | rs);
557	}
558
559	/*
560	 * Initialize logical page 2.
561	 */
562	cfg.page_num = 1;
563	cfg.valid = rbrp->page_valid.bits.ldw.page1;
564	cfg.value = rbrp->page_value_2.value;
565	cfg.mask = rbrp->page_mask_2.value;
566	cfg.reloc = rbrp->page_reloc_2.value;
567
568	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
569	if (rs != NPI_SUCCESS) {
570		return (NXGE_ERROR | rs);
571	}
572
573	/* Initialize the page handle */
574	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
575	    rbrp->page_hdl.bits.ldw.handle);
576
577	if (rs != NPI_SUCCESS) {
578		return (NXGE_ERROR | rs);
579	}
580
581	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
582	    "<== nxge_init_fzc_rxdma_channel_pages"));
583
584	return (NXGE_OK);
585}
586
587/*ARGSUSED*/
588nxge_status_t
589nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
590	uint16_t channel, p_rx_rcr_ring_t rcr_p)
591{
592	npi_handle_t		handle;
593	rdc_red_para_t		red;
594	npi_status_t		rs = NPI_SUCCESS;
595
596	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
597
598	handle = NXGE_DEV_NPI_HANDLE(nxgep);
599	red.value = 0;
600	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
601	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
602	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
603	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
604
605	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
606	    "==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
607	    red.bits.ldw.thre_sync,
608	    red.bits.ldw.thre_sync));
609
610	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
611	if (rs != NPI_SUCCESS) {
612		return (NXGE_ERROR | rs);
613	}
614
615	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
616	    "<== nxge_init_fzc_rxdma_channel_red"));
617
618	return (NXGE_OK);
619}
620
621/*
622 * nxge_init_fzc_tdc
623 *
624 *	Initialize all of a TDC's FZC_DMC registers.
625 *	This is executed by the service domain, on behalf of a
626 *	guest domain, who cannot access these registers.
627 *
628 * Arguments:
629 * 	nxgep
630 * 	channel		The channel to initialize.
631 *
632 * NPI_NXGE function calls:
633 *	nxge_init_fzc_tdc_pages()
634 *	npi_txc_dma_max_burst_set()
635 *
636 * Registers accessed:
637 *	TXC_DMA_MAX_BURST
638 *
639 * Context:
640 *	Service Domain
641 */
642/*ARGSUSED*/
643nxge_status_t
644nxge_init_fzc_tdc(p_nxge_t nxgep, uint16_t channel)
645{
646	nxge_status_t	status = NXGE_OK;
647
648	dma_log_page_t	page1, page2;
649	npi_handle_t	handle;
650
651	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
652
653	/*
654	 * These values have been copied from
655	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
656	 */
657	page1.page_num = 0;
658	page1.valid = 1;
659	page1.func_num = nxgep->function_num;
660	page1.mask = 0;
661	page1.value = 0;
662	page1.reloc = 0;
663
664	page1.page_num = 1;
665	page1.valid = 1;
666	page1.func_num = nxgep->function_num;
667	page1.mask = 0;
668	page1.value = 0;
669	page1.reloc = 0;
670
671#ifdef	NIU_HV_WORKAROUND
672	if (nxgep->niu_type == N2_NIU) {
673		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
674		    "==> nxge_init_fzc_txdma_channel "
675		    "N2_NIU: NEED to set up txdma logical pages"));
676		/* Initialize the TXDMA logical pages */
677		(void) nxge_init_fzc_tdc_pages(nxgep, channel,
678		    &page1, &page2);
679	}
680#endif
681	if (nxgep->niu_type != N2_NIU) {
682		if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
683			/* Initialize the TXDMA logical pages */
684			(void) nxge_init_fzc_tdc_pages(nxgep, channel,
685			    &page1, &page2);
686		} else
687			return (NXGE_ERROR);
688	}
689
690	/*
691	 * Configure the TXC DMA Max Burst value.
692	 *
693	 * PRM.13.5
694	 *
695	 * TXC DMA Max Burst. TXC_DMA_MAX (FZC_TXC + 0000016)
696	 * 19:0		dma_max_burst		RW
697	 * Max burst value associated with DMA. Used by DRR engine
698	 * for computing when DMA has gone into deficit.
699	 */
700	handle = NXGE_DEV_NPI_HANDLE(nxgep);
701	(void) npi_txc_dma_max_burst_set(
702	    handle, channel, TXC_DMA_MAX_BURST_DEFAULT);
703
704	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_tdc"));
705
706	return (status);
707}
708
709/*ARGSUSED*/
710nxge_status_t
711nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
712	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
713{
714	nxge_status_t	status = NXGE_OK;
715
716	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
717	    "==> nxge_init_fzc_txdma_channel"));
718
719	if (nxgep->niu_type == N2_NIU) {
720#ifndef	NIU_HV_WORKAROUND
721#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
722		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
723		    "==> nxge_init_fzc_txdma_channel "
724		    "N2_NIU: call HV to set up txdma logical pages"));
725		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
726		    tx_ring_p);
727		if (status != NXGE_OK) {
728			return (status);
729		}
730#endif
731		status = NXGE_OK;
732#else
733		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
734		    "==> nxge_init_fzc_txdma_channel "
735		    "N2_NIU: NEED to set up txdma logical pages"));
736		/* Initialize the TXDMA logical pages */
737		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
738		    tx_ring_p);
739#endif
740	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
741		/* Initialize the TXDMA logical pages */
742		(void) nxge_init_fzc_txdma_channel_pages(nxgep,
743		    channel, tx_ring_p);
744	} else {
745		return (NXGE_ERROR);
746	}
747
748	/*
749	 * Configure Transmit DRR Weight parameters
750	 * (It actually programs the TXC max burst register).
751	 */
752	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
753
754	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
755	    "<== nxge_init_fzc_txdma_channel"));
756	return (status);
757}
758
759
760nxge_status_t
761nxge_init_fzc_rx_common(p_nxge_t nxgep)
762{
763	npi_handle_t	handle;
764	npi_status_t	rs = NPI_SUCCESS;
765	nxge_status_t	status = NXGE_OK;
766	nxge_rdc_grp_t	*rdc_grp_p;
767	clock_t		lbolt;
768	int		table;
769
770	nxge_hw_pt_cfg_t *hardware;
771
772	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
773	handle = NXGE_DEV_NPI_HANDLE(nxgep);
774	if (!handle.regp) {
775		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
776		    "==> nxge_init_fzc_rx_common null ptr"));
777		return (NXGE_ERROR);
778	}
779
780	/*
781	 * Configure the rxdma clock divider
782	 * This is the granularity counter based on
783	 * the hardware system clock (i.e. 300 Mhz) and
784	 * it is running around 3 nanoseconds.
785	 * So, set the clock divider counter to 1000 to get
786	 * microsecond granularity.
787	 * For example, for a 3 microsecond timeout, the timeout
788	 * will be set to 1.
789	 */
790	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
791	if (rs != NPI_SUCCESS)
792		return (NXGE_ERROR | rs);
793
794#if defined(__i386)
795	rs = npi_rxdma_cfg_32bitmode_enable(handle);
796	if (rs != NPI_SUCCESS)
797		return (NXGE_ERROR | rs);
798	rs = npi_txdma_mode32_set(handle, B_TRUE);
799	if (rs != NPI_SUCCESS)
800		return (NXGE_ERROR | rs);
801#endif
802
803	/*
804	 * Enable WRED and program an initial value.
805	 * Use time to set the initial random number.
806	 */
807	(void) drv_getparm(LBOLT, &lbolt);
808	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
809	if (rs != NPI_SUCCESS)
810		return (NXGE_ERROR | rs);
811
812	hardware = &nxgep->pt_config.hw_config;
813	for (table = 0; table < NXGE_MAX_RDC_GRPS; table++) {
814		/* Does this table belong to <nxgep>? */
815		if (hardware->grpids[table] == (nxgep->function_num + 256)) {
816			rdc_grp_p = &nxgep->pt_config.rdc_grps[table];
817			status = nxge_init_fzc_rdc_tbl(nxgep, rdc_grp_p, table);
818		}
819	}
820
821	/* Ethernet Timeout Counter (?) */
822
823	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
824	    "<== nxge_init_fzc_rx_common:status 0x%08x", status));
825
826	return (status);
827}
828
829nxge_status_t
830nxge_init_fzc_rdc_tbl(p_nxge_t nxge, nxge_rdc_grp_t *group, int rdc_tbl)
831{
832	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
833	nx_rdc_tbl_t	*table;
834	npi_handle_t	handle;
835
836	npi_status_t	rs = NPI_SUCCESS;
837	nxge_status_t	status = NXGE_OK;
838
839	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_init_fzc_rdc_tbl(%d)", table));
840
841	/* This RDC table must have been previously bound to <nxge>. */
842	MUTEX_ENTER(&nhd->lock);
843	table = &nhd->rdc_tbl[rdc_tbl];
844	if (table->nxge != (uintptr_t)nxge) {
845		MUTEX_EXIT(&nhd->lock);
846		NXGE_ERROR_MSG((nxge, DMA_CTL,
847		    "nxge_init_fzc_rdc_tbl(%d): not owner", table));
848		return (NXGE_ERROR);
849	} else {
850		table->map = group->map;
851	}
852	MUTEX_EXIT(&nhd->lock);
853
854	handle = NXGE_DEV_NPI_HANDLE(nxge);
855
856	rs = npi_rxdma_rdc_table_config(handle, rdc_tbl,
857	    group->map, group->max_rdcs);
858
859	if (rs != NPI_SUCCESS) {
860		status = NXGE_ERROR | rs;
861	}
862
863	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_init_fzc_rdc_tbl(%d)", table));
864	return (status);
865}
866
867static
868int
869rdc_tbl_bind(p_nxge_t nxge, int rdc_tbl)
870{
871	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
872	nx_rdc_tbl_t *table;
873	int i;
874
875	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_bind"));
876
877	MUTEX_ENTER(&nhd->lock);
878	/* is the caller asking for a particular table? */
879	if (rdc_tbl >= 0 && rdc_tbl < NXGE_MAX_RDC_GROUPS) {
880		table = &nhd->rdc_tbl[rdc_tbl];
881		if (table->nxge == 0) {
882			table->nxge = (uintptr_t)nxge; /* It is now bound. */
883			NXGE_DEBUG_MSG((nxge, DMA_CTL,
884			    "<== nxge_fzc_rdc_tbl_bind(%d)", rdc_tbl));
885			MUTEX_EXIT(&nhd->lock);
886			return (rdc_tbl);
887		}
888	} else {	/* The caller will take any old RDC table. */
889		for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
890			nx_rdc_tbl_t *table = &nhd->rdc_tbl[i];
891			if (table->nxge == 0) {
892				table->nxge = (uintptr_t)nxge;
893				/* It is now bound. */
894				MUTEX_EXIT(&nhd->lock);
895				NXGE_DEBUG_MSG((nxge, DMA_CTL,
896				    "<== nxge_fzc_rdc_tbl_bind: %d", i));
897				return (i);
898			}
899		}
900	}
901	MUTEX_EXIT(&nhd->lock);
902
903	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_fzc_rdc_tbl_bind"));
904
905	return (-EBUSY);	/* RDC tables are bound. */
906}
907
908int
909nxge_fzc_rdc_tbl_bind(
910	nxge_t *nxge,
911	int grp_index,
912	int acceptNoSubstitutes)
913{
914	nxge_hw_pt_cfg_t *hardware;
915	int index;
916
917	hardware = &nxge->pt_config.hw_config;
918
919	if ((index = rdc_tbl_bind(nxge, grp_index)) < 0) {
920		if (acceptNoSubstitutes)
921			return (index);
922		index = rdc_tbl_bind(nxge, grp_index);
923		if (index < 0) {
924			NXGE_ERROR_MSG((nxge, OBP_CTL,
925			    "nxge_fzc_rdc_tbl_init: "
926			    "there are no free RDC tables!"));
927			return (index);
928		}
929	}
930
931	hardware->grpids[index] = nxge->function_num + 256;
932
933	return (index);
934}
935
936int
937nxge_fzc_rdc_tbl_unbind(p_nxge_t nxge, int rdc_tbl)
938{
939	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
940	nx_rdc_tbl_t *table;
941
942	if (nhd == NULL)
943		return (0);
944
945	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_unbind(%d)",
946	    rdc_tbl));
947
948	MUTEX_ENTER(&nhd->lock);
949	table = &nhd->rdc_tbl[rdc_tbl];
950	if (table->nxge != (uintptr_t)nxge) {
951		NXGE_ERROR_MSG((nxge, DMA_CTL,
952		    "nxge_fzc_rdc_tbl_unbind(%d): func%d not owner",
953		    nxge->function_num, rdc_tbl));
954		MUTEX_EXIT(&nhd->lock);
955		return (EINVAL);
956	} else {
957		bzero(table, sizeof (*table));
958	}
959	MUTEX_EXIT(&nhd->lock);
960
961	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_fzc_rdc_tbl_unbind(%d)",
962	    rdc_tbl));
963
964	return (0);
965}
966
967nxge_status_t
968nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
969{
970	npi_handle_t		handle;
971	p_nxge_dma_pt_cfg_t	p_all_cfgp;
972	p_nxge_hw_pt_cfg_t	p_cfgp;
973	hostinfo_t 		hostinfo;
974	int			i;
975	npi_status_t		rs = NPI_SUCCESS;
976	p_nxge_class_pt_cfg_t 	p_class_cfgp;
977	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
978
979	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
980	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
981	handle = NXGE_DEV_NPI_HANDLE(nxgep);
982	/*
983	 * Initialize the port scheduler DRR weight.
984	 * npi_rxdma_cfg_port_ddr_weight();
985	 */
986
987	if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
988	    (nxgep->mac.portmode == PORT_1G_FIBER) ||
989	    (nxgep->mac.portmode == PORT_1G_TN1010) ||
990	    (nxgep->mac.portmode == PORT_1G_SERDES)) {
991		rs = npi_rxdma_cfg_port_ddr_weight(handle,
992		    nxgep->function_num, NXGE_RX_DRR_WT_1G);
993		if (rs != NPI_SUCCESS) {
994			return (NXGE_ERROR | rs);
995		}
996	}
997
998	/* Program the default RDC of a port */
999	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
1000	    p_cfgp->def_rdc);
1001	if (rs != NPI_SUCCESS) {
1002		return (NXGE_ERROR | rs);
1003	}
1004
1005	/*
1006	 * Configure the MAC host info table with RDC tables
1007	 */
1008	hostinfo.value = 0;
1009	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1010	for (i = 0; i < p_cfgp->max_macs; i++) {
1011		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->def_mac_rxdma_grpid;
1012		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
1013		if (p_class_cfgp->mac_host_info[i].flag) {
1014			hostinfo.bits.w0.rdc_tbl_num =
1015			    p_class_cfgp->mac_host_info[i].rdctbl;
1016			hostinfo.bits.w0.mac_pref =
1017			    p_class_cfgp->mac_host_info[i].mpr_npr;
1018		}
1019
1020		rs = npi_mac_hostinfo_entry(handle, OP_SET,
1021		    nxgep->function_num, i, &hostinfo);
1022		if (rs != NPI_SUCCESS)
1023			return (NXGE_ERROR | rs);
1024	}
1025
1026	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1027	    "<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
1028
1029	return (NXGE_OK);
1030
1031}
1032
1033nxge_status_t
1034nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
1035{
1036	npi_status_t rs = NPI_SUCCESS;
1037	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
1038	    port, rdc);
1039	if (rs & NPI_FAILURE)
1040		return (NXGE_ERROR | rs);
1041	return (NXGE_OK);
1042}
1043
1044/*
1045 * nxge_init_fzc_tdc_pages
1046 *
1047 *	Configure a TDC's logical pages.
1048 *
1049 *	This function is executed by the service domain, on behalf of
1050 *	a guest domain, to whom this TDC has been loaned.
1051 *
1052 * Arguments:
1053 * 	nxgep
1054 * 	channel		The channel to initialize.
1055 * 	page0		Logical page 0 definition.
1056 * 	page1		Logical page 1 definition.
1057 *
1058 * Notes:
1059 *	I think that this function can be called from any
1060 *	domain, but I need to check.
1061 *
1062 * NPI/NXGE function calls:
1063 *	hv_niu_tx_logical_page_conf()
1064 *	hv_niu_tx_logical_page_info()
1065 *
1066 * Context:
1067 *	Any domain
1068 */
1069nxge_status_t
1070nxge_init_fzc_tdc_pages(
1071	p_nxge_t nxgep,
1072	uint16_t channel,
1073	dma_log_page_t *page0,
1074	dma_log_page_t *page1)
1075{
1076	npi_handle_t handle;
1077	npi_status_t rs;
1078
1079	log_page_hdl_t page_handle;
1080
1081	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1082	    "==> nxge_init_fzc_txdma_channel_pages"));
1083
1084#ifndef	NIU_HV_WORKAROUND
1085	if (nxgep->niu_type == N2_NIU) {
1086		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1087		    "<== nxge_init_fzc_tdc_pages: "
1088		    "N2_NIU: no need to set txdma logical pages"));
1089		return (NXGE_OK);
1090	}
1091#else
1092	if (nxgep->niu_type == N2_NIU) {
1093		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1094		    "<== nxge_init_fzc_tdc_pages: "
1095		    "N2_NIU: NEED to set txdma logical pages"));
1096	}
1097#endif
1098
1099	/*
1100	 * Initialize logical page 1.
1101	 */
1102	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1103	if ((rs = npi_txdma_log_page_set(handle, channel, page0))
1104	    != NPI_SUCCESS)
1105		return (NXGE_ERROR | rs);
1106
1107	/*
1108	 * Initialize logical page 2.
1109	 */
1110	if ((rs = npi_txdma_log_page_set(handle, channel, page1))
1111	    != NPI_SUCCESS)
1112		return (NXGE_ERROR | rs);
1113
1114	/*
1115	 * Initialize the page handle.
1116	 * (In the current driver, this is always set to 0.)
1117	 */
1118	page_handle.value = 0;
1119	rs = npi_txdma_log_page_handle_set(handle, channel, &page_handle);
1120	if (rs == NPI_SUCCESS) {
1121		return (NXGE_OK);
1122	} else {
1123		return (NXGE_ERROR | rs);
1124	}
1125}
1126
1127nxge_status_t
1128nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1129	p_tx_ring_t tx_ring_p)
1130{
1131	npi_handle_t		handle;
1132	dma_log_page_t		cfg;
1133	npi_status_t		rs = NPI_SUCCESS;
1134
1135	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1136	    "==> nxge_init_fzc_txdma_channel_pages"));
1137
1138#ifndef	NIU_HV_WORKAROUND
1139	if (nxgep->niu_type == N2_NIU) {
1140		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1141		    "<== nxge_init_fzc_txdma_channel_pages: "
1142		    "N2_NIU: no need to set txdma logical pages"));
1143		return (NXGE_OK);
1144	}
1145#else
1146	if (nxgep->niu_type == N2_NIU) {
1147		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1148		    "<== nxge_init_fzc_txdma_channel_pages: "
1149		    "N2_NIU: NEED to set txdma logical pages"));
1150	}
1151#endif
1152
1153	/*
1154	 * Initialize logical page 1.
1155	 */
1156	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1157	cfg.func_num = nxgep->function_num;
1158	cfg.page_num = 0;
1159	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
1160	cfg.value = tx_ring_p->page_value_1.value;
1161	cfg.mask = tx_ring_p->page_mask_1.value;
1162	cfg.reloc = tx_ring_p->page_reloc_1.value;
1163
1164	rs = npi_txdma_log_page_set(handle, channel,
1165	    (p_dma_log_page_t)&cfg);
1166	if (rs != NPI_SUCCESS) {
1167		return (NXGE_ERROR | rs);
1168	}
1169
1170	/*
1171	 * Initialize logical page 2.
1172	 */
1173	cfg.page_num = 1;
1174	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
1175	cfg.value = tx_ring_p->page_value_2.value;
1176	cfg.mask = tx_ring_p->page_mask_2.value;
1177	cfg.reloc = tx_ring_p->page_reloc_2.value;
1178
1179	rs = npi_txdma_log_page_set(handle, channel, &cfg);
1180	if (rs != NPI_SUCCESS) {
1181		return (NXGE_ERROR | rs);
1182	}
1183
1184	/* Initialize the page handle */
1185	rs = npi_txdma_log_page_handle_set(handle, channel,
1186	    &tx_ring_p->page_hdl);
1187
1188	if (rs == NPI_SUCCESS) {
1189		return (NXGE_OK);
1190	} else {
1191		return (NXGE_ERROR | rs);
1192	}
1193}
1194
1195
1196nxge_status_t
1197nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
1198	p_tx_ring_t tx_ring_p)
1199{
1200	npi_status_t	rs = NPI_SUCCESS;
1201	npi_handle_t	handle;
1202
1203	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1204	rs = npi_txc_dma_max_burst_set(handle, channel,
1205	    tx_ring_p->max_burst.value);
1206	if (rs == NPI_SUCCESS) {
1207		return (NXGE_OK);
1208	} else {
1209		return (NXGE_ERROR | rs);
1210	}
1211}
1212
1213nxge_status_t
1214nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
1215{
1216	npi_status_t	rs = NPI_SUCCESS;
1217	npi_handle_t	handle;
1218
1219	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1220	rs = npi_fzc_sys_err_mask_set(handle, mask);
1221	if (rs == NPI_SUCCESS) {
1222		return (NXGE_OK);
1223	} else {
1224		return (NXGE_ERROR | rs);
1225	}
1226}
1227
1228/*
1229 * nxge_init_hv_fzc_txdma_channel_pages
1230 *
1231 *	Configure a TDC's logical pages.
1232 *
1233 * Arguments:
1234 * 	nxgep
1235 * 	channel		The channel to initialize.
1236 * 	tx_ring_p	The transmit ring.
1237 *
1238 * Notes:
1239 *	I think that this function can be called from any
1240 *	domain, but I need to check.
1241 *
1242 * NPI/NXGE function calls:
1243 *	hv_niu_tx_logical_page_conf()
1244 *	hv_niu_tx_logical_page_info()
1245 *
1246 * Context:
1247 *	Any domain
1248 */
1249#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
1250nxge_status_t
1251nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1252	p_tx_ring_t tx_ring_p)
1253{
1254	int			err;
1255	uint64_t		hverr;
1256#ifdef	DEBUG
1257	uint64_t		ra, size;
1258#endif
1259
1260	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1261	    "==> nxge_init_hv_fzc_txdma_channel_pages"));
1262
1263	if (tx_ring_p->hv_set) {
1264		return (NXGE_OK);
1265	}
1266
1267	/*
1268	 * Initialize logical page 1 for data buffers.
1269	 */
1270	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1271	    (uint64_t)0, N2NIU_TX_LP_CONF,
1272	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1273	    tx_ring_p->hv_tx_buf_ioaddr_size);
1274
1275	err = (nxge_status_t)nxge_herr2kerr(hverr);
1276	if (err != 0) {
1277		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1278		    "<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1279		    "error status 0x%x "
1280		    "(page 0 data buf) hverr 0x%llx "
1281		    "ioaddr_pp $%p "
1282		    "size 0x%llx ",
1283		    channel,
1284		    err,
1285		    hverr,
1286		    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1287		    tx_ring_p->hv_tx_buf_ioaddr_size));
1288		return (NXGE_ERROR | err);
1289	}
1290
1291#ifdef	DEBUG
1292	ra = size = 0;
1293	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1294	    (uint64_t)0, N2NIU_TX_LP_INFO,
1295	    (uint64_t)&ra, (uint64_t)&size);
1296
1297	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1298	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1299	    "ok status 0x%x "
1300	    "(page 0 data buf) hverr 0x%llx "
1301	    "set ioaddr_pp $%p "
1302	    "set size 0x%llx "
1303	    "get ra ioaddr_pp $%p "
1304	    "get size 0x%llx ",
1305	    channel,
1306	    err,
1307	    hverr,
1308	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1309	    tx_ring_p->hv_tx_buf_ioaddr_size,
1310	    ra,
1311	    size));
1312#endif
1313
1314	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1315	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1316	    "(page 0 data buf) hverr 0x%llx "
1317	    "ioaddr_pp $%p "
1318	    "size 0x%llx ",
1319	    channel,
1320	    hverr,
1321	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1322	    tx_ring_p->hv_tx_buf_ioaddr_size));
1323
1324	/*
1325	 * Initialize logical page 2 for control buffers.
1326	 */
1327	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1328	    (uint64_t)1, N2NIU_TX_LP_CONF,
1329	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1330	    tx_ring_p->hv_tx_cntl_ioaddr_size);
1331
1332	err = (nxge_status_t)nxge_herr2kerr(hverr);
1333
1334	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1335	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1336	    "ok status 0x%x "
1337	    "(page 1 cntl buf) hverr 0x%llx "
1338	    "ioaddr_pp $%p "
1339	    "size 0x%llx ",
1340	    channel,
1341	    err,
1342	    hverr,
1343	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1344	    tx_ring_p->hv_tx_cntl_ioaddr_size));
1345
1346	if (err != 0) {
1347		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1348		    "<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1349		    "error status 0x%x "
1350		    "(page 1 cntl buf) hverr 0x%llx "
1351		    "ioaddr_pp $%p "
1352		    "size 0x%llx ",
1353		    channel,
1354		    err,
1355		    hverr,
1356		    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1357		    tx_ring_p->hv_tx_cntl_ioaddr_size));
1358		return (NXGE_ERROR | err);
1359	}
1360
1361#ifdef	DEBUG
1362	ra = size = 0;
1363	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1364	    (uint64_t)1, N2NIU_TX_LP_INFO,
1365	    (uint64_t)&ra, (uint64_t)&size);
1366
1367	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1368	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1369	    "(page 1 cntl buf) hverr 0x%llx "
1370	    "set ioaddr_pp $%p "
1371	    "set size 0x%llx "
1372	    "get ra ioaddr_pp $%p "
1373	    "get size 0x%llx ",
1374	    channel,
1375	    hverr,
1376	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1377	    tx_ring_p->hv_tx_cntl_ioaddr_size,
1378	    ra,
1379	    size));
1380#endif
1381
1382	tx_ring_p->hv_set = B_TRUE;
1383
1384	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1385	    "<== nxge_init_hv_fzc_txdma_channel_pages"));
1386
1387	return (NXGE_OK);
1388}
1389
1390/*ARGSUSED*/
1391nxge_status_t
1392nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
1393		uint16_t channel, p_rx_rbr_ring_t rbrp)
1394{
1395	int			err;
1396	uint64_t		hverr;
1397#ifdef	DEBUG
1398	uint64_t		ra, size;
1399#endif
1400
1401	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1402	    "==> nxge_init_hv_fzc_rxdma_channel_pages"));
1403
1404	if (rbrp->hv_set) {
1405		return (NXGE_OK);
1406	}
1407
1408	/* Initialize data buffers for page 0 */
1409	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1410	    (uint64_t)0, N2NIU_RX_LP_CONF,
1411	    rbrp->hv_rx_buf_base_ioaddr_pp,
1412	    rbrp->hv_rx_buf_ioaddr_size);
1413
1414	err = (nxge_status_t)nxge_herr2kerr(hverr);
1415	if (err != 0) {
1416		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1417		    "<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1418		    "error status 0x%x "
1419		    "(page 0 data buf) hverr 0x%llx "
1420		    "ioaddr_pp $%p "
1421		    "size 0x%llx ",
1422		    channel,
1423		    err,
1424		    hverr,
1425		    rbrp->hv_rx_buf_base_ioaddr_pp,
1426		    rbrp->hv_rx_buf_ioaddr_size));
1427
1428		return (NXGE_ERROR | err);
1429	}
1430
1431#ifdef	DEBUG
1432	ra = size = 0;
1433	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1434	    (uint64_t)0, N2NIU_RX_LP_INFO,
1435	    (uint64_t)&ra, (uint64_t)&size);
1436
1437	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1438	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1439	    "ok status 0x%x "
1440	    "(page 0 data buf) hverr 0x%llx "
1441	    "set databuf ioaddr_pp $%p "
1442	    "set databuf size 0x%llx "
1443	    "get databuf ra ioaddr_pp %p "
1444	    "get databuf size 0x%llx",
1445	    channel,
1446	    err,
1447	    hverr,
1448	    rbrp->hv_rx_buf_base_ioaddr_pp,
1449	    rbrp->hv_rx_buf_ioaddr_size,
1450	    ra,
1451	    size));
1452#endif
1453
1454	/* Initialize control buffers for logical page 1.  */
1455	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1456	    (uint64_t)1, N2NIU_RX_LP_CONF,
1457	    rbrp->hv_rx_cntl_base_ioaddr_pp,
1458	    rbrp->hv_rx_cntl_ioaddr_size);
1459
1460	err = (nxge_status_t)nxge_herr2kerr(hverr);
1461	if (err != 0) {
1462		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1463		    "<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1464		    "error status 0x%x "
1465		    "(page 1 cntl buf) hverr 0x%llx "
1466		    "ioaddr_pp $%p "
1467		    "size 0x%llx ",
1468		    channel,
1469		    err,
1470		    hverr,
1471		    rbrp->hv_rx_buf_base_ioaddr_pp,
1472		    rbrp->hv_rx_buf_ioaddr_size));
1473
1474		return (NXGE_ERROR | err);
1475	}
1476
1477#ifdef	DEBUG
1478	ra = size = 0;
1479	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
1480	    (uint64_t)1, N2NIU_RX_LP_INFO,
1481	    (uint64_t)&ra, (uint64_t)&size);
1482
1483	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1484	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1485	    "error status 0x%x "
1486	    "(page 1 cntl buf) hverr 0x%llx "
1487	    "set cntl ioaddr_pp $%p "
1488	    "set cntl size 0x%llx "
1489	    "get cntl ioaddr_pp $%p "
1490	    "get cntl size 0x%llx ",
1491	    channel,
1492	    err,
1493	    hverr,
1494	    rbrp->hv_rx_cntl_base_ioaddr_pp,
1495	    rbrp->hv_rx_cntl_ioaddr_size,
1496	    ra,
1497	    size));
1498#endif
1499
1500	rbrp->hv_set = B_FALSE;
1501
1502	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1503	    "<== nxge_init_hv_fzc_rxdma_channel_pages"));
1504
1505	return (NXGE_OK);
1506}
1507
1508/*
1509 * Map hypervisor error code to errno. Only
1510 * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
1511 * for niu driver. Any other error codes are mapped to EINVAL.
1512 */
1513static int
1514nxge_herr2kerr(uint64_t hv_errcode)
1515{
1516	int	s_errcode;
1517
1518	switch (hv_errcode) {
1519	case H_ENORADDR:
1520	case H_EBADALIGN:
1521		s_errcode = EFAULT;
1522		break;
1523	case H_EOK:
1524		s_errcode = 0;
1525		break;
1526	default:
1527		s_errcode = EINVAL;
1528		break;
1529	}
1530	return (s_errcode);
1531}
1532
1533uint64_t
1534nxge_init_hv_fzc_lp_op(p_nxge_t nxgep, uint64_t channel,
1535    uint64_t page_no, uint64_t op_type,
1536    uint64_t ioaddr_pp, uint64_t ioaddr_size)
1537{
1538	uint64_t		hverr;
1539	uint64_t		major;
1540	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1541	nxhv_dc_fp_t		*io_fp;
1542
1543	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1544	    "==> nxge_init_hv_fzc_lp_op"));
1545
1546	major = nxgep->niu_hsvc.hsvc_major;
1547	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1548	    "==> nxge_init_hv_fzc_lp_op (major %d): channel %d op_type 0x%x "
1549	    "page_no %d ioaddr_pp $%p ioaddr_size 0x%llx",
1550	    major, channel, op_type, page_no, ioaddr_pp, ioaddr_size));
1551
1552	/* Call the transmit conf function. */
1553	switch (major) {
1554	case NIU_MAJOR_VER: /* 1 */
1555		switch (op_type) {
1556		case N2NIU_TX_LP_CONF:
1557			io_fp = &nhd->hio.tx;
1558			hverr = (*io_fp->lp_conf)((uint64_t)channel,
1559			    (uint64_t)page_no,
1560			    (uint64_t)ioaddr_pp,
1561			    (uint64_t)ioaddr_size);
1562			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1563			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
1564			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1565			break;
1566
1567		case N2NIU_TX_LP_INFO:
1568			io_fp = &nhd->hio.tx;
1569			hverr = (*io_fp->lp_info)((uint64_t)channel,
1570			    (uint64_t)page_no,
1571			    (uint64_t *)ioaddr_pp,
1572			    (uint64_t *)ioaddr_size);
1573			break;
1574
1575		case N2NIU_RX_LP_CONF:
1576			io_fp = &nhd->hio.rx;
1577			hverr = (*io_fp->lp_conf)((uint64_t)channel,
1578			    (uint64_t)page_no,
1579			    (uint64_t)ioaddr_pp,
1580			    (uint64_t)ioaddr_size);
1581			break;
1582
1583		case N2NIU_RX_LP_INFO:
1584			io_fp = &nhd->hio.rx;
1585			hverr = (*io_fp->lp_info)((uint64_t)channel,
1586			    (uint64_t)page_no,
1587			    (uint64_t *)ioaddr_pp,
1588			    (uint64_t *)ioaddr_size);
1589			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1590			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1591			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1592			break;
1593
1594		default:
1595			hverr = EINVAL;
1596			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1597			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1598			    "invalid op 0x%x hverr 0x%x", major,
1599			    op_type, hverr));
1600			break;
1601		}
1602
1603		break;
1604
1605	case NIU_MAJOR_VER_2: /* 2 */
1606		switch (op_type) {
1607		case N2NIU_TX_LP_CONF:
1608			io_fp = &nhd->hio.tx;
1609			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
1610			    (uint64_t)channel,
1611			    (uint64_t)page_no, ioaddr_pp, ioaddr_size);
1612
1613			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1614			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
1615			    "op 0x%x hverr 0x%x", major, op_type, hverr));
1616			break;
1617
1618		case N2NIU_TX_LP_INFO:
1619			io_fp = &nhd->hio.tx;
1620			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
1621			    (uint64_t)channel,
1622			    (uint64_t)page_no,
1623			    (uint64_t *)ioaddr_pp,
1624			    (uint64_t *)ioaddr_size);
1625			break;
1626
1627		case N2NIU_RX_LP_CONF:
1628			io_fp = &nhd->hio.rx;
1629			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
1630			    (uint64_t)channel,
1631			    (uint64_t)page_no,
1632			    (uint64_t)ioaddr_pp,
1633			    (uint64_t)ioaddr_size);
1634			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1635			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1636			    "hverr 0x%x", major, hverr));
1637			break;
1638
1639		case N2NIU_RX_LP_INFO:
1640			io_fp = &nhd->hio.rx;
1641			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
1642			    (uint64_t)channel,
1643			    (uint64_t)page_no,
1644			    (uint64_t *)ioaddr_pp,
1645			    (uint64_t *)ioaddr_size);
1646			break;
1647
1648		default:
1649			hverr = EINVAL;
1650			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1651			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
1652			    "invalid op 0x%x hverr 0x%x", major,
1653			    op_type, hverr));
1654			break;
1655		}
1656
1657		break;
1658
1659	default:
1660		hverr = EINVAL;
1661		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1662		    "==> nxge_init_hv_fzc_lp_op(rx_conf): invalid major %d "
1663		    "op 0x%x hverr 0x%x", major, op_type, hverr));
1664		break;
1665	}
1666
1667	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1668	    "<== nxge_init_hv_fzc_lp_op: 0x%x", hverr));
1669
1670	return (hverr);
1671}
1672
1673#endif	/* sun4v and NIU_LP_WORKAROUND */
1674