1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_kernel.h"
35
36struct infinipath_stats ipath_stats;
37
38/**
39 * ipath_snap_cntr - snapshot a chip counter
40 * @dd: the infinipath device
41 * @creg: the counter to snapshot
42 *
43 * called from add_timer and user counter read calls, to deal with
44 * counters that wrap in "human time".  The words sent and received, and
45 * the packets sent and received are all that we worry about.  For now,
46 * at least, we don't worry about error counters, because if they wrap
47 * that quickly, we probably don't care.  We may eventually just make this
48 * handle all the counters.  word counters can wrap in about 20 seconds
49 * of full bandwidth traffic, packet counters in a few hours.
50 */
51
52u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
53{
54	u32 val, reg64 = 0;
55	u64 val64;
56	unsigned long t0, t1;
57	u64 ret;
58
59	t0 = jiffies;
60	/* If fast increment counters are only 32 bits, snapshot them,
61	 * and maintain them as 64bit values in the driver */
62	if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
63	    (creg == dd->ipath_cregs->cr_wordsendcnt ||
64	     creg == dd->ipath_cregs->cr_wordrcvcnt ||
65	     creg == dd->ipath_cregs->cr_pktsendcnt ||
66	     creg == dd->ipath_cregs->cr_pktrcvcnt)) {
67		val64 = ipath_read_creg(dd, creg);
68		val = val64 == ~0ULL ? ~0U : 0;
69		reg64 = 1;
70	} else			/* val64 just to keep gcc quiet... */
71		val64 = val = ipath_read_creg32(dd, creg);
72	/*
73	 * See if a second has passed.  This is just a way to detect things
74	 * that are quite broken.  Normally this should take just a few
75	 * cycles (the check is for long enough that we don't care if we get
76	 * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
77	 * normal NB values
78	 */
79	t1 = jiffies;
80	if (time_before(t0 + HZ, t1) && val == -1) {
81		ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
82			      creg);
83		ret = 0ULL;
84		goto bail;
85	}
86	if (reg64) {
87		ret = val64;
88		goto bail;
89	}
90
91	if (creg == dd->ipath_cregs->cr_wordsendcnt) {
92		if (val != dd->ipath_lastsword) {
93			dd->ipath_sword += val - dd->ipath_lastsword;
94			dd->ipath_lastsword = val;
95		}
96		val64 = dd->ipath_sword;
97	} else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
98		if (val != dd->ipath_lastrword) {
99			dd->ipath_rword += val - dd->ipath_lastrword;
100			dd->ipath_lastrword = val;
101		}
102		val64 = dd->ipath_rword;
103	} else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
104		if (val != dd->ipath_lastspkts) {
105			dd->ipath_spkts += val - dd->ipath_lastspkts;
106			dd->ipath_lastspkts = val;
107		}
108		val64 = dd->ipath_spkts;
109	} else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
110		if (val != dd->ipath_lastrpkts) {
111			dd->ipath_rpkts += val - dd->ipath_lastrpkts;
112			dd->ipath_lastrpkts = val;
113		}
114		val64 = dd->ipath_rpkts;
115	} else
116		val64 = (u64) val;
117
118	ret = val64;
119
120bail:
121	return ret;
122}
123
124/**
125 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
126 * @dd: the infinipath device
127 *
128 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
129 * every 5 seconds.  User processes are printed at close, but kernel doesn't
130 * close, so...  Separate routine so may call from other places someday, and
131 * so function name when printed by _IPATH_INFO is meaningfull
132 */
133static void ipath_qcheck(struct ipath_devdata *dd)
134{
135	static u64 last_tot_hdrqfull;
136	size_t blen = 0;
137	char buf[128];
138
139	*buf = 0;
140	if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
141		blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
142				dd->ipath_pd[0]->port_hdrqfull -
143				dd->ipath_p0_hdrqfull);
144		dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
145	}
146	if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
147		blen += snprintf(buf + blen, sizeof buf - blen,
148				 "%srcvegrfull %llu",
149				 blen ? ", " : "",
150				 (unsigned long long)
151				 (ipath_stats.sps_etidfull -
152				  dd->ipath_last_tidfull));
153		dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
154	}
155
156	/*
157	 * this is actually the number of hdrq full interrupts, not actual
158	 * events, but at the moment that's mostly what I'm interested in.
159	 * Actual count, etc. is in the counters, if needed.  For production
160	 * users this won't ordinarily be printed.
161	 */
162
163	if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
164	    ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
165		blen += snprintf(buf + blen, sizeof buf - blen,
166				 "%shdrqfull %llu (all ports)",
167				 blen ? ", " : "",
168				 (unsigned long long)
169				 (ipath_stats.sps_hdrqfull -
170				  last_tot_hdrqfull));
171		last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
172	}
173	if (blen)
174		ipath_dbg("%s\n", buf);
175
176	if (dd->ipath_port0head != (u32)
177	    le64_to_cpu(*dd->ipath_hdrqtailptr)) {
178		if (dd->ipath_lastport0rcv_cnt ==
179		    ipath_stats.sps_port0pkts) {
180			ipath_cdbg(PKT, "missing rcv interrupts? "
181				   "port0 hd=%llx tl=%x; port0pkts %llx\n",
182				   (unsigned long long)
183				   le64_to_cpu(*dd->ipath_hdrqtailptr),
184				   dd->ipath_port0head,
185				   (unsigned long long)
186				   ipath_stats.sps_port0pkts);
187		}
188		dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
189	}
190}
191
192/**
193 * ipath_get_faststats - get word counters from chip before they overflow
194 * @opaque - contains a pointer to the infinipath device ipath_devdata
195 *
196 * called from add_timer
197 */
198void ipath_get_faststats(unsigned long opaque)
199{
200	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
201	u32 val;
202	static unsigned cnt;
203
204	/*
205	 * don't access the chip while running diags, or memory diags can
206	 * fail
207	 */
208	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
209	    ipath_diag_inuse)
210		/* but re-arm the timer, for diags case; won't hurt other */
211		goto done;
212
213	if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
214		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
215		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
216		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
217		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
218	}
219
220	ipath_qcheck(dd);
221
222	/*
223	 * deal with repeat error suppression.  Doesn't really matter if
224	 * last error was almost a full interval ago, or just a few usecs
225	 * ago; still won't get more than 2 per interval.  We may want
226	 * longer intervals for this eventually, could do with mod, counter
227	 * or separate timer.  Also see code in ipath_handle_errors() and
228	 * ipath_handle_hwerrors().
229	 */
230
231	if (dd->ipath_lasterror)
232		dd->ipath_lasterror = 0;
233	if (dd->ipath_lasthwerror)
234		dd->ipath_lasthwerror = 0;
235	if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
236	    && time_after(jiffies, dd->ipath_unmasktime)) {
237		char ebuf[256];
238		int iserr;
239		iserr = ipath_decode_err(ebuf, sizeof ebuf,
240				 (dd->ipath_maskederrs & ~dd->
241				  ipath_ignorederrs));
242		if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
243				~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
244				INFINIPATH_E_PKTERRS ))
245			ipath_dev_err(dd, "Re-enabling masked errors "
246				      "(%s)\n", ebuf);
247		else {
248			/*
249			 * rcvegrfull and rcvhdrqfull are "normal", for some
250			 * types of processes (mostly benchmarks) that send
251			 * huge numbers of messages, while not processing
252			 * them.  So only complain about these at debug
253			 * level.
254			 */
255			if (iserr)
256					ipath_dbg("Re-enabling queue full errors (%s)\n",
257							ebuf);
258			else
259				ipath_cdbg(ERRPKT, "Re-enabling packet"
260						" problem interrupt (%s)\n", ebuf);
261		}
262		dd->ipath_maskederrs = dd->ipath_ignorederrs;
263		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
264				 ~dd->ipath_maskederrs);
265	}
266
267	/* limit qfull messages to ~one per minute per port */
268	if ((++cnt & 0x10)) {
269		for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
270		     val--) {
271			if (dd->ipath_lastegrheads[val] != -1)
272				dd->ipath_lastegrheads[val] = -1;
273			if (dd->ipath_lastrcvhdrqtails[val] != -1)
274				dd->ipath_lastrcvhdrqtails[val] = -1;
275		}
276	}
277
278done:
279	mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
280}
281