1/******************************************************************************
2
3  Copyright (c) 2001-2013, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "ixgbe_type.h"
37#include "ixgbe_dcb.h"
38#include "ixgbe_dcb_82599.h"
39
40/**
41 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
42 * @hw: pointer to hardware structure
43 * @stats: pointer to statistics structure
44 * @tc_count:  Number of elements in bwg_array.
45 *
46 * This function returns the status data for each of the Traffic Classes in use.
47 */
48s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
49				 struct ixgbe_hw_stats *stats,
50				 u8 tc_count)
51{
52	int tc;
53
54	DEBUGFUNC("dcb_get_tc_stats");
55
56	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
57		return IXGBE_ERR_PARAM;
58
59	/* Statistics pertaining to each traffic class */
60	for (tc = 0; tc < tc_count; tc++) {
61		/* Transmitted Packets */
62		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
63		/* Transmitted Bytes (read low first to prevent missed carry) */
64		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
65		stats->qbtc[tc] +=
66			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
67		/* Received Packets */
68		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
69		/* Received Bytes (read low first to prevent missed carry) */
70		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
71		stats->qbrc[tc] +=
72			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
73
74		/* Received Dropped Packet */
75		stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
76	}
77
78	return IXGBE_SUCCESS;
79}
80
81/**
82 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
83 * @hw: pointer to hardware structure
84 * @stats: pointer to statistics structure
85 * @tc_count:  Number of elements in bwg_array.
86 *
87 * This function returns the CBFC status data for each of the Traffic Classes.
88 */
89s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
90				  struct ixgbe_hw_stats *stats,
91				  u8 tc_count)
92{
93	int tc;
94
95	DEBUGFUNC("dcb_get_pfc_stats");
96
97	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
98		return IXGBE_ERR_PARAM;
99
100	for (tc = 0; tc < tc_count; tc++) {
101		/* Priority XOFF Transmitted */
102		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
103		/* Priority XOFF Received */
104		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
105	}
106
107	return IXGBE_SUCCESS;
108}
109
110/**
111 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
112 * @hw: pointer to hardware structure
113 * @dcb_config: pointer to ixgbe_dcb_config structure
114 *
115 * Configure Rx Packet Arbiter and credits for each traffic class.
116 */
117s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
118				      u16 *max, u8 *bwg_id, u8 *tsa,
119				      u8 *map)
120{
121	u32 reg = 0;
122	u32 credit_refill = 0;
123	u32 credit_max = 0;
124	u8  i = 0;
125
126	/*
127	 * Disable the arbiter before changing parameters
128	 * (always enable recycle mode; WSP)
129	 */
130	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
131	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
132
133	/*
134	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
135	 * bits sets for the UPs that needs to be mappped to that TC.
136	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
137	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
138	 */
139	reg = 0;
140	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
141		reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
142
143	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
144
145	/* Configure traffic class credits and priority */
146	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
147		credit_refill = refill[i];
148		credit_max = max[i];
149		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
150
151		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
152
153		if (tsa[i] == ixgbe_dcb_tsa_strict)
154			reg |= IXGBE_RTRPT4C_LSP;
155
156		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
157	}
158
159	/*
160	 * Configure Rx packet plane (recycle mode; WSP) and
161	 * enable arbiter
162	 */
163	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
164	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
165
166	return IXGBE_SUCCESS;
167}
168
169/**
170 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
171 * @hw: pointer to hardware structure
172 * @dcb_config: pointer to ixgbe_dcb_config structure
173 *
174 * Configure Tx Descriptor Arbiter and credits for each traffic class.
175 */
176s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
177					   u16 *max, u8 *bwg_id, u8 *tsa)
178{
179	u32 reg, max_credits;
180	u8  i;
181
182	/* Clear the per-Tx queue credits; we use per-TC instead */
183	for (i = 0; i < 128; i++) {
184		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
185		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
186	}
187
188	/* Configure traffic class credits and priority */
189	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
190		max_credits = max[i];
191		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
192		reg |= refill[i];
193		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
194
195		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
196			reg |= IXGBE_RTTDT2C_GSP;
197
198		if (tsa[i] == ixgbe_dcb_tsa_strict)
199			reg |= IXGBE_RTTDT2C_LSP;
200
201		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
202	}
203
204	/*
205	 * Configure Tx descriptor plane (recycle mode; WSP) and
206	 * enable arbiter
207	 */
208	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
209	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
210
211	return IXGBE_SUCCESS;
212}
213
214/**
215 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
216 * @hw: pointer to hardware structure
217 * @dcb_config: pointer to ixgbe_dcb_config structure
218 *
219 * Configure Tx Packet Arbiter and credits for each traffic class.
220 */
221s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
222					   u16 *max, u8 *bwg_id, u8 *tsa,
223					   u8 *map)
224{
225	u32 reg;
226	u8 i;
227
228	/*
229	 * Disable the arbiter before changing parameters
230	 * (always enable recycle mode; SP; arb delay)
231	 */
232	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
233	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
234	      IXGBE_RTTPCS_ARBDIS;
235	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
236
237	/*
238	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
239	 * bits sets for the UPs that needs to be mappped to that TC.
240	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
241	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
242	 */
243	reg = 0;
244	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
245		reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
246
247	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
248
249	/* Configure traffic class credits and priority */
250	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
251		reg = refill[i];
252		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
253		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
254
255		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
256			reg |= IXGBE_RTTPT2C_GSP;
257
258		if (tsa[i] == ixgbe_dcb_tsa_strict)
259			reg |= IXGBE_RTTPT2C_LSP;
260
261		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
262	}
263
264	/*
265	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
266	 * enable arbiter
267	 */
268	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
269	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
270	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
271
272	return IXGBE_SUCCESS;
273}
274
275/**
276 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
277 * @hw: pointer to hardware structure
278 * @pfc_en: enabled pfc bitmask
279 * @map: priority to tc assignments indexed by priority
280 *
281 * Configure Priority Flow Control (PFC) for each traffic class.
282 */
283s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
284{
285	u32 i, j, fcrtl, reg;
286	u8 max_tc = 0;
287
288	/* Enable Transmit Priority Flow Control */
289	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
290
291	/* Enable Receive Priority Flow Control */
292	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
293	reg |= IXGBE_MFLCN_DPF;
294
295	/*
296	 * X540 supports per TC Rx priority flow control.  So
297	 * clear all TCs and only enable those that should be
298	 * enabled.
299	 */
300	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
301
302	if (hw->mac.type == ixgbe_mac_X540)
303		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
304
305	if (pfc_en)
306		reg |= IXGBE_MFLCN_RPFCE;
307
308	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
309
310	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
311		if (map[i] > max_tc)
312			max_tc = map[i];
313	}
314
315
316	/* Configure PFC Tx thresholds per TC */
317	for (i = 0; i <= max_tc; i++) {
318		int enabled = 0;
319
320		for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
321			if ((map[j] == i) && (pfc_en & (1 << j))) {
322				enabled = 1;
323				break;
324			}
325		}
326
327		if (enabled) {
328			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
329			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
330			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
331		} else {
332			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
333			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
334		}
335
336		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
337	}
338
339	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
340		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
341		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
342	}
343
344	/* Configure pause time (2 TCs per register) */
345	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
346	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
347		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
348
349	/* Configure flow control refresh threshold value */
350	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
351
352	return IXGBE_SUCCESS;
353}
354
355/**
356 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
357 * @hw: pointer to hardware structure
358 *
359 * Configure queue statistics registers, all queues belonging to same traffic
360 * class uses a single set of queue statistics counters.
361 */
362s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
363				    struct ixgbe_dcb_config *dcb_config)
364{
365	u32 reg = 0;
366	u8  i   = 0;
367	u8 tc_count = 8;
368	bool vt_mode = FALSE;
369
370	if (dcb_config != NULL) {
371		tc_count = dcb_config->num_tcs.pg_tcs;
372		vt_mode = dcb_config->vt_mode;
373	}
374
375	if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
376		return IXGBE_ERR_PARAM;
377
378	if (tc_count == 8 && vt_mode == FALSE) {
379		/*
380		 * Receive Queues stats setting
381		 * 32 RQSMR registers, each configuring 4 queues.
382		 *
383		 * Set all 16 queues of each TC to the same stat
384		 * with TC 'n' going to stat 'n'.
385		 */
386		for (i = 0; i < 32; i++) {
387			reg = 0x01010101 * (i / 4);
388			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
389		}
390		/*
391		 * Transmit Queues stats setting
392		 * 32 TQSM registers, each controlling 4 queues.
393		 *
394		 * Set all queues of each TC to the same stat
395		 * with TC 'n' going to stat 'n'.
396		 * Tx queues are allocated non-uniformly to TCs:
397		 * 32, 32, 16, 16, 8, 8, 8, 8.
398		 */
399		for (i = 0; i < 32; i++) {
400			if (i < 8)
401				reg = 0x00000000;
402			else if (i < 16)
403				reg = 0x01010101;
404			else if (i < 20)
405				reg = 0x02020202;
406			else if (i < 24)
407				reg = 0x03030303;
408			else if (i < 26)
409				reg = 0x04040404;
410			else if (i < 28)
411				reg = 0x05050505;
412			else if (i < 30)
413				reg = 0x06060606;
414			else
415				reg = 0x07070707;
416			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
417		}
418	} else if (tc_count == 4 && vt_mode == FALSE) {
419		/*
420		 * Receive Queues stats setting
421		 * 32 RQSMR registers, each configuring 4 queues.
422		 *
423		 * Set all 16 queues of each TC to the same stat
424		 * with TC 'n' going to stat 'n'.
425		 */
426		for (i = 0; i < 32; i++) {
427			if (i % 8 > 3)
428				/* In 4 TC mode, odd 16-queue ranges are
429				 *  not used.
430				*/
431				continue;
432			reg = 0x01010101 * (i / 8);
433			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
434		}
435		/*
436		 * Transmit Queues stats setting
437		 * 32 TQSM registers, each controlling 4 queues.
438		 *
439		 * Set all queues of each TC to the same stat
440		 * with TC 'n' going to stat 'n'.
441		 * Tx queues are allocated non-uniformly to TCs:
442		 * 64, 32, 16, 16.
443		 */
444		for (i = 0; i < 32; i++) {
445			if (i < 16)
446				reg = 0x00000000;
447			else if (i < 24)
448				reg = 0x01010101;
449			else if (i < 28)
450				reg = 0x02020202;
451			else
452				reg = 0x03030303;
453			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
454		}
455	} else if (tc_count == 4 && vt_mode == TRUE) {
456		/*
457		 * Receive Queues stats setting
458		 * 32 RQSMR registers, each configuring 4 queues.
459		 *
460		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
461		 * pool. Set all 32 queues of each TC across pools to the same
462		 * stat with TC 'n' going to stat 'n'.
463		 */
464		for (i = 0; i < 32; i++)
465			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
466		/*
467		 * Transmit Queues stats setting
468		 * 32 TQSM registers, each controlling 4 queues.
469		 *
470		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
471		 * pool. Set all 32 queues of each TC across pools to the same
472		 * stat with TC 'n' going to stat 'n'.
473		 */
474		for (i = 0; i < 32; i++)
475			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
476	}
477
478	return IXGBE_SUCCESS;
479}
480
481/**
482 * ixgbe_dcb_config_82599 - Configure general DCB parameters
483 * @hw: pointer to hardware structure
484 * @dcb_config: pointer to ixgbe_dcb_config structure
485 *
486 * Configure general DCB parameters.
487 */
488s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
489			   struct ixgbe_dcb_config *dcb_config)
490{
491	u32 reg;
492	u32 q;
493
494	/* Disable the Tx desc arbiter so that MTQC can be changed */
495	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
496	reg |= IXGBE_RTTDCS_ARBDIS;
497	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
498
499	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
500	if (dcb_config->num_tcs.pg_tcs == 8) {
501		/* Enable DCB for Rx with 8 TCs */
502		switch (reg & IXGBE_MRQC_MRQE_MASK) {
503		case 0:
504		case IXGBE_MRQC_RT4TCEN:
505			/* RSS disabled cases */
506			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
507			      IXGBE_MRQC_RT8TCEN;
508			break;
509		case IXGBE_MRQC_RSSEN:
510		case IXGBE_MRQC_RTRSS4TCEN:
511			/* RSS enabled cases */
512			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
513			      IXGBE_MRQC_RTRSS8TCEN;
514			break;
515		default:
516			/*
517			 * Unsupported value, assume stale data,
518			 * overwrite no RSS
519			 */
520			ASSERT(0);
521			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
522			      IXGBE_MRQC_RT8TCEN;
523		}
524	}
525	if (dcb_config->num_tcs.pg_tcs == 4) {
526		/* We support both VT-on and VT-off with 4 TCs. */
527		if (dcb_config->vt_mode)
528			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
529			      IXGBE_MRQC_VMDQRT4TCEN;
530		else
531			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
532			      IXGBE_MRQC_RTRSS4TCEN;
533	}
534	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
535
536	/* Enable DCB for Tx with 8 TCs */
537	if (dcb_config->num_tcs.pg_tcs == 8)
538		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
539	else {
540		/* We support both VT-on and VT-off with 4 TCs. */
541		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
542		if (dcb_config->vt_mode)
543			reg |= IXGBE_MTQC_VT_ENA;
544	}
545	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
546
547	/* Disable drop for all queues */
548	for (q = 0; q < 128; q++)
549		IXGBE_WRITE_REG(hw, IXGBE_QDE,
550				(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
551
552	/* Enable the Tx desc arbiter */
553	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
554	reg &= ~IXGBE_RTTDCS_ARBDIS;
555	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
556
557	/* Enable Security TX Buffer IFG for DCB */
558	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
559	reg |= IXGBE_SECTX_DCB;
560	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
561
562	return IXGBE_SUCCESS;
563}
564
565/**
566 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
567 * @hw: pointer to hardware structure
568 * @dcb_config: pointer to ixgbe_dcb_config structure
569 *
570 * Configure dcb settings and enable dcb mode.
571 */
572s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
573			      u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
574			      u8 *map)
575{
576
577	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
578					  map);
579	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
580					       tsa);
581	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
582					       tsa, map);
583
584	return IXGBE_SUCCESS;
585}
586
587