• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/ixgbe/
1/*******************************************************************************
2
3  Intel 10 Gigabit PCI Express Linux driver
4  Copyright(c) 1999 - 2010 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe.h"
29#include "ixgbe_type.h"
30#include "ixgbe_dcb.h"
31#include "ixgbe_dcb_82599.h"
32
33/**
34 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
35 * @hw: pointer to hardware structure
36 * @stats: pointer to statistics structure
37 * @tc_count:  Number of elements in bwg_array.
38 *
39 * This function returns the status data for each of the Traffic Classes in use.
40 */
41s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
42                                 struct ixgbe_hw_stats *stats,
43                                 u8 tc_count)
44{
45	int tc;
46
47	if (tc_count > MAX_TRAFFIC_CLASS)
48		return DCB_ERR_PARAM;
49	/* Statistics pertaining to each traffic class */
50	for (tc = 0; tc < tc_count; tc++) {
51		/* Transmitted Packets */
52		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
53		/* Transmitted Bytes */
54		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
55		/* Received Packets */
56		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
57		/* Received Bytes */
58		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
59	}
60
61	return 0;
62}
63
64/**
65 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
66 * @hw: pointer to hardware structure
67 * @stats: pointer to statistics structure
68 * @tc_count:  Number of elements in bwg_array.
69 *
70 * This function returns the CBFC status data for each of the Traffic Classes.
71 */
72s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
73                                  struct ixgbe_hw_stats *stats,
74                                  u8 tc_count)
75{
76	int tc;
77
78	if (tc_count > MAX_TRAFFIC_CLASS)
79		return DCB_ERR_PARAM;
80	for (tc = 0; tc < tc_count; tc++) {
81		/* Priority XOFF Transmitted */
82		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
83		/* Priority XOFF Received */
84		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
85	}
86
87	return 0;
88}
89
90/**
91 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
92 * @hw: pointer to hardware structure
93 * @dcb_config: pointer to ixgbe_dcb_config structure
94 *
95 * Configure packet buffers for DCB mode.
96 */
97s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
98                                          struct ixgbe_dcb_config *dcb_config)
99{
100	s32 ret_val = 0;
101	u32 value = IXGBE_RXPBSIZE_64KB;
102	u8  i = 0;
103
104	/* Setup Rx packet buffer sizes */
105	switch (dcb_config->rx_pba_cfg) {
106	case pba_80_48:
107		/* Setup the first four at 80KB */
108		value = IXGBE_RXPBSIZE_80KB;
109		for (; i < 4; i++)
110			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
111		/* Setup the last four at 48KB...don't re-init i */
112		value = IXGBE_RXPBSIZE_48KB;
113		/* Fall Through */
114	case pba_equal:
115	default:
116		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
117			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
118
119		/* Setup Tx packet buffer sizes */
120		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
121			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
122			                IXGBE_TXPBSIZE_20KB);
123			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
124			                IXGBE_TXPBTHRESH_DCB);
125		}
126		break;
127	}
128
129	return ret_val;
130}
131
132/**
133 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
134 * @hw: pointer to hardware structure
135 * @dcb_config: pointer to ixgbe_dcb_config structure
136 *
137 * Configure Rx Packet Arbiter and credits for each traffic class.
138 */
139s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
140                                      struct ixgbe_dcb_config *dcb_config)
141{
142	struct tc_bw_alloc    *p;
143	u32    reg           = 0;
144	u32    credit_refill = 0;
145	u32    credit_max    = 0;
146	u8     i             = 0;
147
148	/*
149	 * Disable the arbiter before changing parameters
150	 * (always enable recycle mode; WSP)
151	 */
152	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
153	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
154
155	/* Map all traffic classes to their UP, 1 to 1 */
156	reg = 0;
157	for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
158		reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
159	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
160
161	/* Configure traffic class credits and priority */
162	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
163		p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
164
165		credit_refill = p->data_credits_refill;
166		credit_max    = p->data_credits_max;
167		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
168
169		reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
170
171		if (p->prio_type == prio_link)
172			reg |= IXGBE_RTRPT4C_LSP;
173
174		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
175	}
176
177	/*
178	 * Configure Rx packet plane (recycle mode; WSP) and
179	 * enable arbiter
180	 */
181	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
182	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
183
184	return 0;
185}
186
187/**
188 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
189 * @hw: pointer to hardware structure
190 * @dcb_config: pointer to ixgbe_dcb_config structure
191 *
192 * Configure Tx Descriptor Arbiter and credits for each traffic class.
193 */
194s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
195                                           struct ixgbe_dcb_config *dcb_config)
196{
197	struct tc_bw_alloc *p;
198	u32    reg, max_credits;
199	u8     i;
200
201	/* Clear the per-Tx queue credits; we use per-TC instead */
202	for (i = 0; i < 128; i++) {
203		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
204		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
205	}
206
207	/* Configure traffic class credits and priority */
208	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
209		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
210		max_credits = dcb_config->tc_config[i].desc_credits_max;
211		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
212		reg |= p->data_credits_refill;
213		reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
214
215		if (p->prio_type == prio_group)
216			reg |= IXGBE_RTTDT2C_GSP;
217
218		if (p->prio_type == prio_link)
219			reg |= IXGBE_RTTDT2C_LSP;
220
221		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
222	}
223
224	/*
225	 * Configure Tx descriptor plane (recycle mode; WSP) and
226	 * enable arbiter
227	 */
228	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
229	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
230
231	return 0;
232}
233
234/**
235 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
236 * @hw: pointer to hardware structure
237 * @dcb_config: pointer to ixgbe_dcb_config structure
238 *
239 * Configure Tx Packet Arbiter and credits for each traffic class.
240 */
241s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
242                                           struct ixgbe_dcb_config *dcb_config)
243{
244	struct tc_bw_alloc *p;
245	u32 reg;
246	u8 i;
247
248	/*
249	 * Disable the arbiter before changing parameters
250	 * (always enable recycle mode; SP; arb delay)
251	 */
252	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
253	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
254	      IXGBE_RTTPCS_ARBDIS;
255	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
256
257	/* Map all traffic classes to their UP, 1 to 1 */
258	reg = 0;
259	for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
260		reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
261	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
262
263	/* Configure traffic class credits and priority */
264	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
265		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
266		reg = p->data_credits_refill;
267		reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
268		reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
269
270		if (p->prio_type == prio_group)
271			reg |= IXGBE_RTTPT2C_GSP;
272
273		if (p->prio_type == prio_link)
274			reg |= IXGBE_RTTPT2C_LSP;
275
276		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
277	}
278
279	/*
280	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
281	 * enable arbiter
282	 */
283	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
284	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
285	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
286
287	return 0;
288}
289
290/**
291 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
292 * @hw: pointer to hardware structure
293 * @dcb_config: pointer to ixgbe_dcb_config structure
294 *
295 * Configure Priority Flow Control (PFC) for each traffic class.
296 */
297s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
298                               struct ixgbe_dcb_config *dcb_config)
299{
300	u32 i, reg, rx_pba_size;
301
302	/* If PFC is disabled globally then fall back to LFC. */
303	if (!dcb_config->pfc_mode_enable) {
304		for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
305			hw->mac.ops.fc_enable(hw, i);
306		goto out;
307	}
308
309	/* Configure PFC Tx thresholds per TC */
310	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
311		if (dcb_config->rx_pba_cfg == pba_equal)
312			rx_pba_size = IXGBE_RXPBSIZE_64KB;
313		else
314			rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
315			                      : IXGBE_RXPBSIZE_48KB;
316
317		reg = ((rx_pba_size >> 5) & 0xFFE0);
318		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
319		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
320			reg |= IXGBE_FCRTL_XONE;
321		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
322
323		reg = ((rx_pba_size >> 2) & 0xFFE0);
324		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
325		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
326			reg |= IXGBE_FCRTH_FCEN;
327		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
328	}
329
330	/* Configure pause time (2 TCs per register) */
331	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
332	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
333		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
334
335	/* Configure flow control refresh threshold value */
336	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
337
338	/* Enable Transmit PFC */
339	reg = IXGBE_FCCFG_TFCE_PRIORITY;
340	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
341
342	/*
343	 * Enable Receive PFC
344	 * We will always honor XOFF frames we receive when
345	 * we are in PFC mode.
346	 */
347	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
348	reg &= ~IXGBE_MFLCN_RFCE;
349	reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
350	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
351out:
352	return 0;
353}
354
355/**
356 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
357 * @hw: pointer to hardware structure
358 *
359 * Configure queue statistics registers, all queues belonging to same traffic
360 * class uses a single set of queue statistics counters.
361 */
362s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
363{
364	u32 reg = 0;
365	u8  i   = 0;
366
367	/*
368	 * Receive Queues stats setting
369	 * 32 RQSMR registers, each configuring 4 queues.
370	 * Set all 16 queues of each TC to the same stat
371	 * with TC 'n' going to stat 'n'.
372	 */
373	for (i = 0; i < 32; i++) {
374		reg = 0x01010101 * (i / 4);
375		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
376	}
377	/*
378	 * Transmit Queues stats setting
379	 * 32 TQSM registers, each controlling 4 queues.
380	 * Set all queues of each TC to the same stat
381	 * with TC 'n' going to stat 'n'.
382	 * Tx queues are allocated non-uniformly to TCs:
383	 * 32, 32, 16, 16, 8, 8, 8, 8.
384	 */
385	for (i = 0; i < 32; i++) {
386		if (i < 8)
387			reg = 0x00000000;
388		else if (i < 16)
389			reg = 0x01010101;
390		else if (i < 20)
391			reg = 0x02020202;
392		else if (i < 24)
393			reg = 0x03030303;
394		else if (i < 26)
395			reg = 0x04040404;
396		else if (i < 28)
397			reg = 0x05050505;
398		else if (i < 30)
399			reg = 0x06060606;
400		else
401			reg = 0x07070707;
402		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
403	}
404
405	return 0;
406}
407
408/**
409 * ixgbe_dcb_config_82599 - Configure general DCB parameters
410 * @hw: pointer to hardware structure
411 * @dcb_config: pointer to ixgbe_dcb_config structure
412 *
413 * Configure general DCB parameters.
414 */
415s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
416{
417	u32 reg;
418	u32 q;
419
420	/* Disable the Tx desc arbiter so that MTQC can be changed */
421	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
422	reg |= IXGBE_RTTDCS_ARBDIS;
423	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
424
425	/* Enable DCB for Rx with 8 TCs */
426	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
427	switch (reg & IXGBE_MRQC_MRQE_MASK) {
428	case 0:
429	case IXGBE_MRQC_RT4TCEN:
430		/* RSS disabled cases */
431		reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
432		break;
433	case IXGBE_MRQC_RSSEN:
434	case IXGBE_MRQC_RTRSS4TCEN:
435		/* RSS enabled cases */
436		reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
437		break;
438	default:
439		/* Unsupported value, assume stale data, overwrite no RSS */
440		reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
441	}
442	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
443
444	/* Enable DCB for Tx with 8 TCs */
445	reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
446	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
447
448	/* Disable drop for all queues */
449	for (q = 0; q < 128; q++)
450		IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
451
452	/* Enable the Tx desc arbiter */
453	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
454	reg &= ~IXGBE_RTTDCS_ARBDIS;
455	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
456
457	return 0;
458}
459
460/**
461 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
462 * @hw: pointer to hardware structure
463 * @dcb_config: pointer to ixgbe_dcb_config structure
464 *
465 * Configure dcb settings and enable dcb mode.
466 */
467s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
468                              struct ixgbe_dcb_config *dcb_config)
469{
470	ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
471	ixgbe_dcb_config_82599(hw);
472	ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
473	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
474	ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
475	ixgbe_dcb_config_pfc_82599(hw, dcb_config);
476	ixgbe_dcb_config_tc_stats_82599(hw);
477
478	return 0;
479}
480