• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/qib/
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/pci.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39
40#include "qib.h"
41
42static unsigned qib_hol_timeout_ms = 3000;
43module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
44MODULE_PARM_DESC(hol_timeout_ms,
45		 "duration of user app suspension after link failure");
46
47unsigned qib_sdma_fetch_arb = 1;
48module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
49MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
50
51/**
52 * qib_disarm_piobufs - cancel a range of PIO buffers
53 * @dd: the qlogic_ib device
54 * @first: the first PIO buffer to cancel
55 * @cnt: the number of PIO buffers to cancel
56 *
57 * Cancel a range of PIO buffers. Used at user process close,
58 * in case it died while writing to a PIO buffer.
59 */
60void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
61{
62	unsigned long flags;
63	unsigned i;
64	unsigned last;
65
66	last = first + cnt;
67	spin_lock_irqsave(&dd->pioavail_lock, flags);
68	for (i = first; i < last; i++) {
69		__clear_bit(i, dd->pio_need_disarm);
70		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
71	}
72	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
73}
74
75/*
76 * This is called by a user process when it sees the DISARM_BUFS event
77 * bit is set.
78 */
79int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
80{
81	struct qib_devdata *dd = rcd->dd;
82	unsigned i;
83	unsigned last;
84	unsigned n = 0;
85
86	last = rcd->pio_base + rcd->piocnt;
87	/*
88	 * Don't need uctxt_lock here, since user has called in to us.
89	 * Clear at start in case more interrupts set bits while we
90	 * are disarming
91	 */
92	if (rcd->user_event_mask) {
93		/*
94		 * subctxt_cnt is 0 if not shared, so do base
95		 * separately, first, then remaining subctxt, if any
96		 */
97		clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
98		for (i = 1; i < rcd->subctxt_cnt; i++)
99			clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
100				  &rcd->user_event_mask[i]);
101	}
102	spin_lock_irq(&dd->pioavail_lock);
103	for (i = rcd->pio_base; i < last; i++) {
104		if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
105			n++;
106			dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
107		}
108	}
109	spin_unlock_irq(&dd->pioavail_lock);
110	return 0;
111}
112
113static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
114{
115	struct qib_pportdata *ppd;
116	unsigned pidx;
117
118	for (pidx = 0; pidx < dd->num_pports; pidx++) {
119		ppd = dd->pport + pidx;
120		if (i >= ppd->sdma_state.first_sendbuf &&
121		    i < ppd->sdma_state.last_sendbuf)
122			return ppd;
123	}
124	return NULL;
125}
126
127/*
128 * Return true if send buffer is being used by a user context.
129 * Sets  _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
130 */
131static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
132{
133	struct qib_ctxtdata *rcd;
134	unsigned ctxt;
135	int ret = 0;
136
137	spin_lock(&dd->uctxt_lock);
138	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
139		rcd = dd->rcd[ctxt];
140		if (!rcd || bufn < rcd->pio_base ||
141		    bufn >= rcd->pio_base + rcd->piocnt)
142			continue;
143		if (rcd->user_event_mask) {
144			int i;
145			/*
146			 * subctxt_cnt is 0 if not shared, so do base
147			 * separately, first, then remaining subctxt, if any
148			 */
149			set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
150				&rcd->user_event_mask[0]);
151			for (i = 1; i < rcd->subctxt_cnt; i++)
152				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
153					&rcd->user_event_mask[i]);
154		}
155		ret = 1;
156		break;
157	}
158	spin_unlock(&dd->uctxt_lock);
159
160	return ret;
161}
162
163/*
164 * Disarm a set of send buffers.  If the buffer might be actively being
165 * written to, mark the buffer to be disarmed later when it is not being
166 * written to.
167 *
168 * This should only be called from the IRQ error handler.
169 */
170void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
171			    unsigned cnt)
172{
173	struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
174	unsigned i;
175	unsigned long flags;
176
177	for (i = 0; i < dd->num_pports; i++)
178		pppd[i] = NULL;
179
180	for (i = 0; i < cnt; i++) {
181		int which;
182		if (!test_bit(i, mask))
183			continue;
184		/*
185		 * If the buffer is owned by the DMA hardware,
186		 * reset the DMA engine.
187		 */
188		ppd = is_sdma_buf(dd, i);
189		if (ppd) {
190			pppd[ppd->port] = ppd;
191			continue;
192		}
193		/*
194		 * If the kernel is writing the buffer or the buffer is
195		 * owned by a user process, we can't clear it yet.
196		 */
197		spin_lock_irqsave(&dd->pioavail_lock, flags);
198		if (test_bit(i, dd->pio_writing) ||
199		    (!test_bit(i << 1, dd->pioavailkernel) &&
200		     find_ctxt(dd, i))) {
201			__set_bit(i, dd->pio_need_disarm);
202			which = 0;
203		} else {
204			which = 1;
205			dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
206		}
207		spin_unlock_irqrestore(&dd->pioavail_lock, flags);
208	}
209
210	/* do cancel_sends once per port that had sdma piobufs in error */
211	for (i = 0; i < dd->num_pports; i++)
212		if (pppd[i])
213			qib_cancel_sends(pppd[i]);
214}
215
216/**
217 * update_send_bufs - update shadow copy of the PIO availability map
218 * @dd: the qlogic_ib device
219 *
220 * called whenever our local copy indicates we have run out of send buffers
221 */
222static void update_send_bufs(struct qib_devdata *dd)
223{
224	unsigned long flags;
225	unsigned i;
226	const unsigned piobregs = dd->pioavregs;
227
228	/*
229	 * If the generation (check) bits have changed, then we update the
230	 * busy bit for the corresponding PIO buffer.  This algorithm will
231	 * modify positions to the value they already have in some cases
232	 * (i.e., no change), but it's faster than changing only the bits
233	 * that have changed.
234	 *
235	 * We would like to do this atomicly, to avoid spinlocks in the
236	 * critical send path, but that's not really possible, given the
237	 * type of changes, and that this routine could be called on
238	 * multiple cpu's simultaneously, so we lock in this routine only,
239	 * to avoid conflicting updates; all we change is the shadow, and
240	 * it's a single 64 bit memory location, so by definition the update
241	 * is atomic in terms of what other cpu's can see in testing the
242	 * bits.  The spin_lock overhead isn't too bad, since it only
243	 * happens when all buffers are in use, so only cpu overhead, not
244	 * latency or bandwidth is affected.
245	 */
246	if (!dd->pioavailregs_dma)
247		return;
248	spin_lock_irqsave(&dd->pioavail_lock, flags);
249	for (i = 0; i < piobregs; i++) {
250		u64 pchbusy, pchg, piov, pnew;
251
252		piov = le64_to_cpu(dd->pioavailregs_dma[i]);
253		pchg = dd->pioavailkernel[i] &
254			~(dd->pioavailshadow[i] ^ piov);
255		pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
256		if (pchg && (pchbusy & dd->pioavailshadow[i])) {
257			pnew = dd->pioavailshadow[i] & ~pchbusy;
258			pnew |= piov & pchbusy;
259			dd->pioavailshadow[i] = pnew;
260		}
261	}
262	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
263}
264
265/*
266 * Debugging code and stats updates if no pio buffers available.
267 */
268static noinline void no_send_bufs(struct qib_devdata *dd)
269{
270	dd->upd_pio_shadow = 1;
271
272	/* not atomic, but if we lose a stat count in a while, that's OK */
273	qib_stats.sps_nopiobufs++;
274}
275
276/*
277 * Common code for normal driver send buffer allocation, and reserved
278 * allocation.
279 *
280 * Do appropriate marking as busy, etc.
281 * Returns buffer pointer if one is found, otherwise NULL.
282 */
283u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
284				  u32 first, u32 last)
285{
286	unsigned i, j, updated = 0;
287	unsigned nbufs;
288	unsigned long flags;
289	unsigned long *shadow = dd->pioavailshadow;
290	u32 __iomem *buf;
291
292	if (!(dd->flags & QIB_PRESENT))
293		return NULL;
294
295	nbufs = last - first + 1; /* number in range to check */
296	if (dd->upd_pio_shadow) {
297		/*
298		 * Minor optimization.  If we had no buffers on last call,
299		 * start out by doing the update; continue and do scan even
300		 * if no buffers were updated, to be paranoid.
301		 */
302		update_send_bufs(dd);
303		updated++;
304	}
305	i = first;
306rescan:
307	/*
308	 * While test_and_set_bit() is atomic, we do that and then the
309	 * change_bit(), and the pair is not.  See if this is the cause
310	 * of the remaining armlaunch errors.
311	 */
312	spin_lock_irqsave(&dd->pioavail_lock, flags);
313	for (j = 0; j < nbufs; j++, i++) {
314		if (i > last)
315			i = first;
316		if (__test_and_set_bit((2 * i) + 1, shadow))
317			continue;
318		/* flip generation bit */
319		__change_bit(2 * i, shadow);
320		/* remember that the buffer can be written to now */
321		__set_bit(i, dd->pio_writing);
322		break;
323	}
324	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
325
326	if (j == nbufs) {
327		if (!updated) {
328			/*
329			 * First time through; shadow exhausted, but may be
330			 * buffers available, try an update and then rescan.
331			 */
332			update_send_bufs(dd);
333			updated++;
334			i = first;
335			goto rescan;
336		}
337		no_send_bufs(dd);
338		buf = NULL;
339	} else {
340		if (i < dd->piobcnt2k)
341			buf = (u32 __iomem *)(dd->pio2kbase +
342				i * dd->palign);
343		else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
344			buf = (u32 __iomem *)(dd->pio4kbase +
345				(i - dd->piobcnt2k) * dd->align4k);
346		else
347			buf = (u32 __iomem *)(dd->piovl15base +
348				(i - (dd->piobcnt2k + dd->piobcnt4k)) *
349				dd->align4k);
350		if (pbufnum)
351			*pbufnum = i;
352		dd->upd_pio_shadow = 0;
353	}
354
355	return buf;
356}
357
358/*
359 * Record that the caller is finished writing to the buffer so we don't
360 * disarm it while it is being written and disarm it now if needed.
361 */
362void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
363{
364	unsigned long flags;
365
366	spin_lock_irqsave(&dd->pioavail_lock, flags);
367	__clear_bit(n, dd->pio_writing);
368	if (__test_and_clear_bit(n, dd->pio_need_disarm))
369		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
370	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
371}
372
373/**
374 * qib_chg_pioavailkernel - change which send buffers are available for kernel
375 * @dd: the qlogic_ib device
376 * @start: the starting send buffer number
377 * @len: the number of send buffers
378 * @avail: true if the buffers are available for kernel use, false otherwise
379 */
380void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
381	unsigned len, u32 avail, struct qib_ctxtdata *rcd)
382{
383	unsigned long flags;
384	unsigned end;
385	unsigned ostart = start;
386
387	/* There are two bits per send buffer (busy and generation) */
388	start *= 2;
389	end = start + len * 2;
390
391	spin_lock_irqsave(&dd->pioavail_lock, flags);
392	/* Set or clear the busy bit in the shadow. */
393	while (start < end) {
394		if (avail) {
395			unsigned long dma;
396			int i;
397
398			/*
399			 * The BUSY bit will never be set, because we disarm
400			 * the user buffers before we hand them back to the
401			 * kernel.  We do have to make sure the generation
402			 * bit is set correctly in shadow, since it could
403			 * have changed many times while allocated to user.
404			 * We can't use the bitmap functions on the full
405			 * dma array because it is always little-endian, so
406			 * we have to flip to host-order first.
407			 * BITS_PER_LONG is slightly wrong, since it's
408			 * always 64 bits per register in chip...
409			 * We only work on 64 bit kernels, so that's OK.
410			 */
411			i = start / BITS_PER_LONG;
412			__clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
413				    dd->pioavailshadow);
414			dma = (unsigned long)
415				le64_to_cpu(dd->pioavailregs_dma[i]);
416			if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
417				      start) % BITS_PER_LONG, &dma))
418				__set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
419					  start, dd->pioavailshadow);
420			else
421				__clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
422					    + start, dd->pioavailshadow);
423			__set_bit(start, dd->pioavailkernel);
424		} else {
425			__set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
426				  dd->pioavailshadow);
427			__clear_bit(start, dd->pioavailkernel);
428		}
429		start += 2;
430	}
431
432	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
433
434	dd->f_txchk_change(dd, ostart, len, avail, rcd);
435}
436
437/*
438 * Flush all sends that might be in the ready to send state, as well as any
439 * that are in the process of being sent.  Used whenever we need to be
440 * sure the send side is idle.  Cleans up all buffer state by canceling
441 * all pio buffers, and issuing an abort, which cleans up anything in the
442 * launch fifo.  The cancel is superfluous on some chip versions, but
443 * it's safer to always do it.
444 * PIOAvail bits are updated by the chip as if a normal send had happened.
445 */
446void qib_cancel_sends(struct qib_pportdata *ppd)
447{
448	struct qib_devdata *dd = ppd->dd;
449	struct qib_ctxtdata *rcd;
450	unsigned long flags;
451	unsigned ctxt;
452	unsigned i;
453	unsigned last;
454
455	/*
456	 * Tell PSM to disarm buffers again before trying to reuse them.
457	 * We need to be sure the rcd doesn't change out from under us
458	 * while we do so.  We hold the two locks sequentially.  We might
459	 * needlessly set some need_disarm bits as a result, if the
460	 * context is closed after we release the uctxt_lock, but that's
461	 * fairly benign, and safer than nesting the locks.
462	 */
463	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
464		spin_lock_irqsave(&dd->uctxt_lock, flags);
465		rcd = dd->rcd[ctxt];
466		if (rcd && rcd->ppd == ppd) {
467			last = rcd->pio_base + rcd->piocnt;
468			if (rcd->user_event_mask) {
469				/*
470				 * subctxt_cnt is 0 if not shared, so do base
471				 * separately, first, then remaining subctxt,
472				 * if any
473				 */
474				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
475					&rcd->user_event_mask[0]);
476				for (i = 1; i < rcd->subctxt_cnt; i++)
477					set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
478						&rcd->user_event_mask[i]);
479			}
480			i = rcd->pio_base;
481			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
482			spin_lock_irqsave(&dd->pioavail_lock, flags);
483			for (; i < last; i++)
484				__set_bit(i, dd->pio_need_disarm);
485			spin_unlock_irqrestore(&dd->pioavail_lock, flags);
486		} else
487			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
488	}
489
490	if (!(dd->flags & QIB_HAS_SEND_DMA))
491		dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
492				    QIB_SENDCTRL_FLUSH);
493}
494
495/*
496 * Force an update of in-memory copy of the pioavail registers, when
497 * needed for any of a variety of reasons.
498 * If already off, this routine is a nop, on the assumption that the
499 * caller (or set of callers) will "do the right thing".
500 * This is a per-device operation, so just the first port.
501 */
502void qib_force_pio_avail_update(struct qib_devdata *dd)
503{
504	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
505}
506
507void qib_hol_down(struct qib_pportdata *ppd)
508{
509	/*
510	 * Cancel sends when the link goes DOWN so that we aren't doing it
511	 * at INIT when we might be trying to send SMI packets.
512	 */
513	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
514		qib_cancel_sends(ppd);
515}
516
517/*
518 * Link is at INIT.
519 * We start the HoL timer so we can detect stuck packets blocking SMP replies.
520 * Timer may already be running, so use mod_timer, not add_timer.
521 */
522void qib_hol_init(struct qib_pportdata *ppd)
523{
524	if (ppd->hol_state != QIB_HOL_INIT) {
525		ppd->hol_state = QIB_HOL_INIT;
526		mod_timer(&ppd->hol_timer,
527			  jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
528	}
529}
530
531/*
532 * Link is up, continue any user processes, and ensure timer
533 * is a nop, if running.  Let timer keep running, if set; it
534 * will nop when it sees the link is up.
535 */
536void qib_hol_up(struct qib_pportdata *ppd)
537{
538	ppd->hol_state = QIB_HOL_UP;
539}
540
541/*
542 * This is only called via the timer.
543 */
544void qib_hol_event(unsigned long opaque)
545{
546	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
547
548	/* If hardware error, etc, skip. */
549	if (!(ppd->dd->flags & QIB_INITTED))
550		return;
551
552	if (ppd->hol_state != QIB_HOL_UP) {
553		/*
554		 * Try to flush sends in case a stuck packet is blocking
555		 * SMP replies.
556		 */
557		qib_hol_down(ppd);
558		mod_timer(&ppd->hol_timer,
559			  jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
560	}
561}
562