• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/qib/
1/*
2 * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/netdevice.h>
35
36#include "qib.h"
37#include "qib_common.h"
38
39/* default pio off, sdma on */
40static ushort sdma_descq_cnt = 256;
41module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
42MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
43
44/*
45 * Bits defined in the send DMA descriptor.
46 */
47#define SDMA_DESC_LAST          (1ULL << 11)
48#define SDMA_DESC_FIRST         (1ULL << 12)
49#define SDMA_DESC_DMA_HEAD      (1ULL << 13)
50#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
51#define SDMA_DESC_INTR          (1ULL << 15)
52#define SDMA_DESC_COUNT_LSB     16
53#define SDMA_DESC_GEN_LSB       30
54
55char *qib_sdma_state_names[] = {
56	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
57	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
58	[qib_sdma_state_s20_idle]             = "s20_Idle",
59	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
60	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
61	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
62	[qib_sdma_state_s99_running]          = "s99_Running",
63};
64
65char *qib_sdma_event_names[] = {
66	[qib_sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
67	[qib_sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
68	[qib_sdma_event_e20_hw_started]   = "e20_HwStarted",
69	[qib_sdma_event_e30_go_running]   = "e30_GoRunning",
70	[qib_sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
71	[qib_sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
72	[qib_sdma_event_e60_hw_halted]    = "e60_HwHalted",
73	[qib_sdma_event_e70_go_idle]      = "e70_GoIdle",
74	[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
75	[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
76	[qib_sdma_event_e90_timer_tick]   = "e90_TimerTick",
77};
78
79/* declare all statics here rather than keep sorting */
80static int alloc_sdma(struct qib_pportdata *);
81static void sdma_complete(struct kref *);
82static void sdma_finalput(struct qib_sdma_state *);
83static void sdma_get(struct qib_sdma_state *);
84static void sdma_put(struct qib_sdma_state *);
85static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
86static void sdma_start_sw_clean_up(struct qib_pportdata *);
87static void sdma_sw_clean_up_task(unsigned long);
88static void unmap_desc(struct qib_pportdata *, unsigned);
89
90static void sdma_get(struct qib_sdma_state *ss)
91{
92	kref_get(&ss->kref);
93}
94
95static void sdma_complete(struct kref *kref)
96{
97	struct qib_sdma_state *ss =
98		container_of(kref, struct qib_sdma_state, kref);
99
100	complete(&ss->comp);
101}
102
103static void sdma_put(struct qib_sdma_state *ss)
104{
105	kref_put(&ss->kref, sdma_complete);
106}
107
108static void sdma_finalput(struct qib_sdma_state *ss)
109{
110	sdma_put(ss);
111	wait_for_completion(&ss->comp);
112}
113
114/*
115 * Complete all the sdma requests on the active list, in the correct
116 * order, and with appropriate processing.   Called when cleaning up
117 * after sdma shutdown, and when new sdma requests are submitted for
118 * a link that is down.   This matches what is done for requests
119 * that complete normally, it's just the full list.
120 *
121 * Must be called with sdma_lock held
122 */
123static void clear_sdma_activelist(struct qib_pportdata *ppd)
124{
125	struct qib_sdma_txreq *txp, *txp_next;
126
127	list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
128		list_del_init(&txp->list);
129		if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
130			unsigned idx;
131
132			idx = txp->start_idx;
133			while (idx != txp->next_descq_idx) {
134				unmap_desc(ppd, idx);
135				if (++idx == ppd->sdma_descq_cnt)
136					idx = 0;
137			}
138		}
139		if (txp->callback)
140			(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
141	}
142}
143
144static void sdma_sw_clean_up_task(unsigned long opaque)
145{
146	struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
147	unsigned long flags;
148
149	spin_lock_irqsave(&ppd->sdma_lock, flags);
150
151	/*
152	 * At this point, the following should always be true:
153	 * - We are halted, so no more descriptors are getting retired.
154	 * - We are not running, so no one is submitting new work.
155	 * - Only we can send the e40_sw_cleaned, so we can't start
156	 *   running again until we say so.  So, the active list and
157	 *   descq are ours to play with.
158	 */
159
160	/* Process all retired requests. */
161	qib_sdma_make_progress(ppd);
162
163	clear_sdma_activelist(ppd);
164
165	/*
166	 * Resync count of added and removed.  It is VERY important that
167	 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
168	 */
169	ppd->sdma_descq_removed = ppd->sdma_descq_added;
170
171	/*
172	 * Reset our notion of head and tail.
173	 * Note that the HW registers will be reset when switching states
174	 * due to calling __qib_sdma_process_event() below.
175	 */
176	ppd->sdma_descq_tail = 0;
177	ppd->sdma_descq_head = 0;
178	ppd->sdma_head_dma[0] = 0;
179	ppd->sdma_generation = 0;
180
181	__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
182
183	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
184}
185
186/*
187 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
188 * as a result of send buffer errors or send DMA descriptor errors.
189 * We want to disarm the buffers in these cases.
190 */
191static void sdma_hw_start_up(struct qib_pportdata *ppd)
192{
193	struct qib_sdma_state *ss = &ppd->sdma_state;
194	unsigned bufno;
195
196	for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
197		ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
198
199	ppd->dd->f_sdma_hw_start_up(ppd);
200}
201
202static void sdma_sw_tear_down(struct qib_pportdata *ppd)
203{
204	struct qib_sdma_state *ss = &ppd->sdma_state;
205
206	/* Releasing this reference means the state machine has stopped. */
207	sdma_put(ss);
208}
209
210static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
211{
212	tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
213}
214
215static void sdma_set_state(struct qib_pportdata *ppd,
216	enum qib_sdma_states next_state)
217{
218	struct qib_sdma_state *ss = &ppd->sdma_state;
219	struct sdma_set_state_action *action = ss->set_state_action;
220	unsigned op = 0;
221
222	/* debugging bookkeeping */
223	ss->previous_state = ss->current_state;
224	ss->previous_op = ss->current_op;
225
226	ss->current_state = next_state;
227
228	if (action[next_state].op_enable)
229		op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
230
231	if (action[next_state].op_intenable)
232		op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
233
234	if (action[next_state].op_halt)
235		op |= QIB_SDMA_SENDCTRL_OP_HALT;
236
237	if (action[next_state].op_drain)
238		op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
239
240	if (action[next_state].go_s99_running_tofalse)
241		ss->go_s99_running = 0;
242
243	if (action[next_state].go_s99_running_totrue)
244		ss->go_s99_running = 1;
245
246	ss->current_op = op;
247
248	ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
249}
250
251static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
252{
253	__le64 *descqp = &ppd->sdma_descq[head].qw[0];
254	u64 desc[2];
255	dma_addr_t addr;
256	size_t len;
257
258	desc[0] = le64_to_cpu(descqp[0]);
259	desc[1] = le64_to_cpu(descqp[1]);
260
261	addr = (desc[1] << 32) | (desc[0] >> 32);
262	len = (desc[0] >> 14) & (0x7ffULL << 2);
263	dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
264}
265
266static int alloc_sdma(struct qib_pportdata *ppd)
267{
268	ppd->sdma_descq_cnt = sdma_descq_cnt;
269	if (!ppd->sdma_descq_cnt)
270		ppd->sdma_descq_cnt = 256;
271
272	/* Allocate memory for SendDMA descriptor FIFO */
273	ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
274		ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
275		GFP_KERNEL);
276
277	if (!ppd->sdma_descq) {
278		qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
279			    "FIFO memory\n");
280		goto bail;
281	}
282
283	/* Allocate memory for DMA of head register to memory */
284	ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
285		PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
286	if (!ppd->sdma_head_dma) {
287		qib_dev_err(ppd->dd, "failed to allocate SendDMA "
288			    "head memory\n");
289		goto cleanup_descq;
290	}
291	ppd->sdma_head_dma[0] = 0;
292	return 0;
293
294cleanup_descq:
295	dma_free_coherent(&ppd->dd->pcidev->dev,
296		ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
297		ppd->sdma_descq_phys);
298	ppd->sdma_descq = NULL;
299	ppd->sdma_descq_phys = 0;
300bail:
301	ppd->sdma_descq_cnt = 0;
302	return -ENOMEM;
303}
304
305static void free_sdma(struct qib_pportdata *ppd)
306{
307	struct qib_devdata *dd = ppd->dd;
308
309	if (ppd->sdma_head_dma) {
310		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
311				  (void *)ppd->sdma_head_dma,
312				  ppd->sdma_head_phys);
313		ppd->sdma_head_dma = NULL;
314		ppd->sdma_head_phys = 0;
315	}
316
317	if (ppd->sdma_descq) {
318		dma_free_coherent(&dd->pcidev->dev,
319				  ppd->sdma_descq_cnt * sizeof(u64[2]),
320				  ppd->sdma_descq, ppd->sdma_descq_phys);
321		ppd->sdma_descq = NULL;
322		ppd->sdma_descq_phys = 0;
323	}
324}
325
326static inline void make_sdma_desc(struct qib_pportdata *ppd,
327				  u64 *sdmadesc, u64 addr, u64 dwlen,
328				  u64 dwoffset)
329{
330
331	WARN_ON(addr & 3);
332	/* SDmaPhyAddr[47:32] */
333	sdmadesc[1] = addr >> 32;
334	/* SDmaPhyAddr[31:0] */
335	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
336	/* SDmaGeneration[1:0] */
337	sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
338		SDMA_DESC_GEN_LSB;
339	/* SDmaDwordCount[10:0] */
340	sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
341	/* SDmaBufOffset[12:2] */
342	sdmadesc[0] |= dwoffset & 0x7ffULL;
343}
344
345/* sdma_lock must be held */
346int qib_sdma_make_progress(struct qib_pportdata *ppd)
347{
348	struct list_head *lp = NULL;
349	struct qib_sdma_txreq *txp = NULL;
350	struct qib_devdata *dd = ppd->dd;
351	int progress = 0;
352	u16 hwhead;
353	u16 idx = 0;
354
355	hwhead = dd->f_sdma_gethead(ppd);
356
357	/* The reason for some of the complexity of this code is that
358	 * not all descriptors have corresponding txps.  So, we have to
359	 * be able to skip over descs until we wander into the range of
360	 * the next txp on the list.
361	 */
362
363	if (!list_empty(&ppd->sdma_activelist)) {
364		lp = ppd->sdma_activelist.next;
365		txp = list_entry(lp, struct qib_sdma_txreq, list);
366		idx = txp->start_idx;
367	}
368
369	while (ppd->sdma_descq_head != hwhead) {
370		/* if desc is part of this txp, unmap if needed */
371		if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
372		    (idx == ppd->sdma_descq_head)) {
373			unmap_desc(ppd, ppd->sdma_descq_head);
374			if (++idx == ppd->sdma_descq_cnt)
375				idx = 0;
376		}
377
378		/* increment dequed desc count */
379		ppd->sdma_descq_removed++;
380
381		/* advance head, wrap if needed */
382		if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
383			ppd->sdma_descq_head = 0;
384
385		/* if now past this txp's descs, do the callback */
386		if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
387			/* remove from active list */
388			list_del_init(&txp->list);
389			if (txp->callback)
390				(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
391			/* see if there is another txp */
392			if (list_empty(&ppd->sdma_activelist))
393				txp = NULL;
394			else {
395				lp = ppd->sdma_activelist.next;
396				txp = list_entry(lp, struct qib_sdma_txreq,
397					list);
398				idx = txp->start_idx;
399			}
400		}
401		progress = 1;
402	}
403	if (progress)
404		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
405	return progress;
406}
407
408/*
409 * This is called from interrupt context.
410 */
411void qib_sdma_intr(struct qib_pportdata *ppd)
412{
413	unsigned long flags;
414
415	spin_lock_irqsave(&ppd->sdma_lock, flags);
416
417	__qib_sdma_intr(ppd);
418
419	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
420}
421
422void __qib_sdma_intr(struct qib_pportdata *ppd)
423{
424	if (__qib_sdma_running(ppd))
425		qib_sdma_make_progress(ppd);
426}
427
428int qib_setup_sdma(struct qib_pportdata *ppd)
429{
430	struct qib_devdata *dd = ppd->dd;
431	unsigned long flags;
432	int ret = 0;
433
434	ret = alloc_sdma(ppd);
435	if (ret)
436		goto bail;
437
438	/* set consistent sdma state */
439	ppd->dd->f_sdma_init_early(ppd);
440	spin_lock_irqsave(&ppd->sdma_lock, flags);
441	sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
442	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
443
444	/* set up reference counting */
445	kref_init(&ppd->sdma_state.kref);
446	init_completion(&ppd->sdma_state.comp);
447
448	ppd->sdma_generation = 0;
449	ppd->sdma_descq_head = 0;
450	ppd->sdma_descq_removed = 0;
451	ppd->sdma_descq_added = 0;
452
453	INIT_LIST_HEAD(&ppd->sdma_activelist);
454
455	tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
456		(unsigned long)ppd);
457
458	ret = dd->f_init_sdma_regs(ppd);
459	if (ret)
460		goto bail_alloc;
461
462	qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
463
464	return 0;
465
466bail_alloc:
467	qib_teardown_sdma(ppd);
468bail:
469	return ret;
470}
471
472void qib_teardown_sdma(struct qib_pportdata *ppd)
473{
474	qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
475
476	/*
477	 * This waits for the state machine to exit so it is not
478	 * necessary to kill the sdma_sw_clean_up_task to make sure
479	 * it is not running.
480	 */
481	sdma_finalput(&ppd->sdma_state);
482
483	free_sdma(ppd);
484}
485
486int qib_sdma_running(struct qib_pportdata *ppd)
487{
488	unsigned long flags;
489	int ret;
490
491	spin_lock_irqsave(&ppd->sdma_lock, flags);
492	ret = __qib_sdma_running(ppd);
493	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
494
495	return ret;
496}
497
498/*
499 * Complete a request when sdma not running; likely only request
500 * but to simplify the code, always queue it, then process the full
501 * activelist.  We process the entire list to ensure that this particular
502 * request does get it's callback, but in the correct order.
503 * Must be called with sdma_lock held
504 */
505static void complete_sdma_err_req(struct qib_pportdata *ppd,
506				  struct qib_verbs_txreq *tx)
507{
508	atomic_inc(&tx->qp->s_dma_busy);
509	/* no sdma descriptors, so no unmap_desc */
510	tx->txreq.start_idx = 0;
511	tx->txreq.next_descq_idx = 0;
512	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
513	clear_sdma_activelist(ppd);
514}
515
516/*
517 * This function queues one IB packet onto the send DMA queue per call.
518 * The caller is responsible for checking:
519 * 1) The number of send DMA descriptor entries is less than the size of
520 *    the descriptor queue.
521 * 2) The IB SGE addresses and lengths are 32-bit aligned
522 *    (except possibly the last SGE's length)
523 * 3) The SGE addresses are suitable for passing to dma_map_single().
524 */
525int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526			struct qib_sge_state *ss, u32 dwords,
527			struct qib_verbs_txreq *tx)
528{
529	unsigned long flags;
530	struct qib_sge *sge;
531	struct qib_qp *qp;
532	int ret = 0;
533	u16 tail;
534	__le64 *descqp;
535	u64 sdmadesc[2];
536	u32 dwoffset;
537	dma_addr_t addr;
538
539	spin_lock_irqsave(&ppd->sdma_lock, flags);
540
541retry:
542	if (unlikely(!__qib_sdma_running(ppd))) {
543		complete_sdma_err_req(ppd, tx);
544		goto unlock;
545	}
546
547	if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
548		if (qib_sdma_make_progress(ppd))
549			goto retry;
550		if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
551			ppd->dd->f_sdma_set_desc_cnt(ppd,
552					ppd->sdma_descq_cnt / 2);
553		goto busy;
554	}
555
556	dwoffset = tx->hdr_dwords;
557	make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
558
559	sdmadesc[0] |= SDMA_DESC_FIRST;
560	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
561		sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
562
563	/* write to the descq */
564	tail = ppd->sdma_descq_tail;
565	descqp = &ppd->sdma_descq[tail].qw[0];
566	*descqp++ = cpu_to_le64(sdmadesc[0]);
567	*descqp++ = cpu_to_le64(sdmadesc[1]);
568
569	/* increment the tail */
570	if (++tail == ppd->sdma_descq_cnt) {
571		tail = 0;
572		descqp = &ppd->sdma_descq[0].qw[0];
573		++ppd->sdma_generation;
574	}
575
576	tx->txreq.start_idx = tail;
577
578	sge = &ss->sge;
579	while (dwords) {
580		u32 dw;
581		u32 len;
582
583		len = dwords << 2;
584		if (len > sge->length)
585			len = sge->length;
586		if (len > sge->sge_length)
587			len = sge->sge_length;
588		BUG_ON(len == 0);
589		dw = (len + 3) >> 2;
590		addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
591				      dw << 2, DMA_TO_DEVICE);
592		if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
593			goto unmap;
594		sdmadesc[0] = 0;
595		make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
596		/* SDmaUseLargeBuf has to be set in every descriptor */
597		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
598			sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
599		/* write to the descq */
600		*descqp++ = cpu_to_le64(sdmadesc[0]);
601		*descqp++ = cpu_to_le64(sdmadesc[1]);
602
603		/* increment the tail */
604		if (++tail == ppd->sdma_descq_cnt) {
605			tail = 0;
606			descqp = &ppd->sdma_descq[0].qw[0];
607			++ppd->sdma_generation;
608		}
609		sge->vaddr += len;
610		sge->length -= len;
611		sge->sge_length -= len;
612		if (sge->sge_length == 0) {
613			if (--ss->num_sge)
614				*sge = *ss->sg_list++;
615		} else if (sge->length == 0 && sge->mr->lkey) {
616			if (++sge->n >= QIB_SEGSZ) {
617				if (++sge->m >= sge->mr->mapsz)
618					break;
619				sge->n = 0;
620			}
621			sge->vaddr =
622				sge->mr->map[sge->m]->segs[sge->n].vaddr;
623			sge->length =
624				sge->mr->map[sge->m]->segs[sge->n].length;
625		}
626
627		dwoffset += dw;
628		dwords -= dw;
629	}
630
631	if (!tail)
632		descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
633	descqp -= 2;
634	descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
635	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
636		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
637	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
638		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
639
640	atomic_inc(&tx->qp->s_dma_busy);
641	tx->txreq.next_descq_idx = tail;
642	ppd->dd->f_sdma_update_tail(ppd, tail);
643	ppd->sdma_descq_added += tx->txreq.sg_count;
644	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
645	goto unlock;
646
647unmap:
648	for (;;) {
649		if (!tail)
650			tail = ppd->sdma_descq_cnt - 1;
651		else
652			tail--;
653		if (tail == ppd->sdma_descq_tail)
654			break;
655		unmap_desc(ppd, tail);
656	}
657	qp = tx->qp;
658	qib_put_txreq(tx);
659	spin_lock(&qp->r_lock);
660	spin_lock(&qp->s_lock);
661	if (qp->ibqp.qp_type == IB_QPT_RC) {
662		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
663			qib_error_qp(qp, IB_WC_GENERAL_ERR);
664	} else if (qp->s_wqe)
665		qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
666	spin_unlock(&qp->s_lock);
667	spin_unlock(&qp->r_lock);
668	/* return zero to process the next send work request */
669	goto unlock;
670
671busy:
672	qp = tx->qp;
673	spin_lock(&qp->s_lock);
674	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
675		struct qib_ibdev *dev;
676
677		/*
678		 * If we couldn't queue the DMA request, save the info
679		 * and try again later rather than destroying the
680		 * buffer and undoing the side effects of the copy.
681		 */
682		tx->ss = ss;
683		tx->dwords = dwords;
684		qp->s_tx = tx;
685		dev = &ppd->dd->verbs_dev;
686		spin_lock(&dev->pending_lock);
687		if (list_empty(&qp->iowait)) {
688			struct qib_ibport *ibp;
689
690			ibp = &ppd->ibport_data;
691			ibp->n_dmawait++;
692			qp->s_flags |= QIB_S_WAIT_DMA_DESC;
693			list_add_tail(&qp->iowait, &dev->dmawait);
694		}
695		spin_unlock(&dev->pending_lock);
696		qp->s_flags &= ~QIB_S_BUSY;
697		spin_unlock(&qp->s_lock);
698		ret = -EBUSY;
699	} else {
700		spin_unlock(&qp->s_lock);
701		qib_put_txreq(tx);
702	}
703unlock:
704	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
705	return ret;
706}
707
708void qib_sdma_process_event(struct qib_pportdata *ppd,
709	enum qib_sdma_events event)
710{
711	unsigned long flags;
712
713	spin_lock_irqsave(&ppd->sdma_lock, flags);
714
715	__qib_sdma_process_event(ppd, event);
716
717	if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
718		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
719
720	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
721}
722
723void __qib_sdma_process_event(struct qib_pportdata *ppd,
724	enum qib_sdma_events event)
725{
726	struct qib_sdma_state *ss = &ppd->sdma_state;
727
728	switch (ss->current_state) {
729	case qib_sdma_state_s00_hw_down:
730		switch (event) {
731		case qib_sdma_event_e00_go_hw_down:
732			break;
733		case qib_sdma_event_e30_go_running:
734			/*
735			 * If down, but running requested (usually result
736			 * of link up, then we need to start up.
737			 * This can happen when hw down is requested while
738			 * bringing the link up with traffic active on
739			 * 7220, e.g. */
740			ss->go_s99_running = 1;
741			/* fall through and start dma engine */
742		case qib_sdma_event_e10_go_hw_start:
743			/* This reference means the state machine is started */
744			sdma_get(&ppd->sdma_state);
745			sdma_set_state(ppd,
746				       qib_sdma_state_s10_hw_start_up_wait);
747			break;
748		case qib_sdma_event_e20_hw_started:
749			break;
750		case qib_sdma_event_e40_sw_cleaned:
751			sdma_sw_tear_down(ppd);
752			break;
753		case qib_sdma_event_e50_hw_cleaned:
754			break;
755		case qib_sdma_event_e60_hw_halted:
756			break;
757		case qib_sdma_event_e70_go_idle:
758			break;
759		case qib_sdma_event_e7220_err_halted:
760			break;
761		case qib_sdma_event_e7322_err_halted:
762			break;
763		case qib_sdma_event_e90_timer_tick:
764			break;
765		}
766		break;
767
768	case qib_sdma_state_s10_hw_start_up_wait:
769		switch (event) {
770		case qib_sdma_event_e00_go_hw_down:
771			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
772			sdma_sw_tear_down(ppd);
773			break;
774		case qib_sdma_event_e10_go_hw_start:
775			break;
776		case qib_sdma_event_e20_hw_started:
777			sdma_set_state(ppd, ss->go_s99_running ?
778				       qib_sdma_state_s99_running :
779				       qib_sdma_state_s20_idle);
780			break;
781		case qib_sdma_event_e30_go_running:
782			ss->go_s99_running = 1;
783			break;
784		case qib_sdma_event_e40_sw_cleaned:
785			break;
786		case qib_sdma_event_e50_hw_cleaned:
787			break;
788		case qib_sdma_event_e60_hw_halted:
789			break;
790		case qib_sdma_event_e70_go_idle:
791			ss->go_s99_running = 0;
792			break;
793		case qib_sdma_event_e7220_err_halted:
794			break;
795		case qib_sdma_event_e7322_err_halted:
796			break;
797		case qib_sdma_event_e90_timer_tick:
798			break;
799		}
800		break;
801
802	case qib_sdma_state_s20_idle:
803		switch (event) {
804		case qib_sdma_event_e00_go_hw_down:
805			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
806			sdma_sw_tear_down(ppd);
807			break;
808		case qib_sdma_event_e10_go_hw_start:
809			break;
810		case qib_sdma_event_e20_hw_started:
811			break;
812		case qib_sdma_event_e30_go_running:
813			sdma_set_state(ppd, qib_sdma_state_s99_running);
814			ss->go_s99_running = 1;
815			break;
816		case qib_sdma_event_e40_sw_cleaned:
817			break;
818		case qib_sdma_event_e50_hw_cleaned:
819			break;
820		case qib_sdma_event_e60_hw_halted:
821			break;
822		case qib_sdma_event_e70_go_idle:
823			break;
824		case qib_sdma_event_e7220_err_halted:
825			break;
826		case qib_sdma_event_e7322_err_halted:
827			break;
828		case qib_sdma_event_e90_timer_tick:
829			break;
830		}
831		break;
832
833	case qib_sdma_state_s30_sw_clean_up_wait:
834		switch (event) {
835		case qib_sdma_event_e00_go_hw_down:
836			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
837			break;
838		case qib_sdma_event_e10_go_hw_start:
839			break;
840		case qib_sdma_event_e20_hw_started:
841			break;
842		case qib_sdma_event_e30_go_running:
843			ss->go_s99_running = 1;
844			break;
845		case qib_sdma_event_e40_sw_cleaned:
846			sdma_set_state(ppd,
847				       qib_sdma_state_s10_hw_start_up_wait);
848			sdma_hw_start_up(ppd);
849			break;
850		case qib_sdma_event_e50_hw_cleaned:
851			break;
852		case qib_sdma_event_e60_hw_halted:
853			break;
854		case qib_sdma_event_e70_go_idle:
855			ss->go_s99_running = 0;
856			break;
857		case qib_sdma_event_e7220_err_halted:
858			break;
859		case qib_sdma_event_e7322_err_halted:
860			break;
861		case qib_sdma_event_e90_timer_tick:
862			break;
863		}
864		break;
865
866	case qib_sdma_state_s40_hw_clean_up_wait:
867		switch (event) {
868		case qib_sdma_event_e00_go_hw_down:
869			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
870			sdma_start_sw_clean_up(ppd);
871			break;
872		case qib_sdma_event_e10_go_hw_start:
873			break;
874		case qib_sdma_event_e20_hw_started:
875			break;
876		case qib_sdma_event_e30_go_running:
877			ss->go_s99_running = 1;
878			break;
879		case qib_sdma_event_e40_sw_cleaned:
880			break;
881		case qib_sdma_event_e50_hw_cleaned:
882			sdma_set_state(ppd,
883				       qib_sdma_state_s30_sw_clean_up_wait);
884			sdma_start_sw_clean_up(ppd);
885			break;
886		case qib_sdma_event_e60_hw_halted:
887			break;
888		case qib_sdma_event_e70_go_idle:
889			ss->go_s99_running = 0;
890			break;
891		case qib_sdma_event_e7220_err_halted:
892			break;
893		case qib_sdma_event_e7322_err_halted:
894			break;
895		case qib_sdma_event_e90_timer_tick:
896			break;
897		}
898		break;
899
900	case qib_sdma_state_s50_hw_halt_wait:
901		switch (event) {
902		case qib_sdma_event_e00_go_hw_down:
903			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
904			sdma_start_sw_clean_up(ppd);
905			break;
906		case qib_sdma_event_e10_go_hw_start:
907			break;
908		case qib_sdma_event_e20_hw_started:
909			break;
910		case qib_sdma_event_e30_go_running:
911			ss->go_s99_running = 1;
912			break;
913		case qib_sdma_event_e40_sw_cleaned:
914			break;
915		case qib_sdma_event_e50_hw_cleaned:
916			break;
917		case qib_sdma_event_e60_hw_halted:
918			sdma_set_state(ppd,
919				       qib_sdma_state_s40_hw_clean_up_wait);
920			ppd->dd->f_sdma_hw_clean_up(ppd);
921			break;
922		case qib_sdma_event_e70_go_idle:
923			ss->go_s99_running = 0;
924			break;
925		case qib_sdma_event_e7220_err_halted:
926			break;
927		case qib_sdma_event_e7322_err_halted:
928			break;
929		case qib_sdma_event_e90_timer_tick:
930			break;
931		}
932		break;
933
934	case qib_sdma_state_s99_running:
935		switch (event) {
936		case qib_sdma_event_e00_go_hw_down:
937			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
938			sdma_start_sw_clean_up(ppd);
939			break;
940		case qib_sdma_event_e10_go_hw_start:
941			break;
942		case qib_sdma_event_e20_hw_started:
943			break;
944		case qib_sdma_event_e30_go_running:
945			break;
946		case qib_sdma_event_e40_sw_cleaned:
947			break;
948		case qib_sdma_event_e50_hw_cleaned:
949			break;
950		case qib_sdma_event_e60_hw_halted:
951			sdma_set_state(ppd,
952				       qib_sdma_state_s30_sw_clean_up_wait);
953			sdma_start_sw_clean_up(ppd);
954			break;
955		case qib_sdma_event_e70_go_idle:
956			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
957			ss->go_s99_running = 0;
958			break;
959		case qib_sdma_event_e7220_err_halted:
960			sdma_set_state(ppd,
961				       qib_sdma_state_s30_sw_clean_up_wait);
962			sdma_start_sw_clean_up(ppd);
963			break;
964		case qib_sdma_event_e7322_err_halted:
965			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
966			break;
967		case qib_sdma_event_e90_timer_tick:
968			break;
969		}
970		break;
971	}
972
973	ss->last_event = event;
974}
975