1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/netdevice.h>
36#include <linux/moduleparam.h>
37
38#include "qib.h"
39#include "qib_common.h"
40
41/* default pio off, sdma on */
42static ushort sdma_descq_cnt = 256;
43module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
44MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
45
46/*
47 * Bits defined in the send DMA descriptor.
48 */
49#define SDMA_DESC_LAST          (1ULL << 11)
50#define SDMA_DESC_FIRST         (1ULL << 12)
51#define SDMA_DESC_DMA_HEAD      (1ULL << 13)
52#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53#define SDMA_DESC_INTR          (1ULL << 15)
54#define SDMA_DESC_COUNT_LSB     16
55#define SDMA_DESC_GEN_LSB       30
56
57/* declare all statics here rather than keep sorting */
58static int alloc_sdma(struct qib_pportdata *);
59static void sdma_complete(struct kref *);
60static void sdma_finalput(struct qib_sdma_state *);
61static void sdma_get(struct qib_sdma_state *);
62static void sdma_put(struct qib_sdma_state *);
63static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
64static void sdma_start_sw_clean_up(struct qib_pportdata *);
65static void sdma_sw_clean_up_task(struct tasklet_struct *);
66static void unmap_desc(struct qib_pportdata *, unsigned);
67
68static void sdma_get(struct qib_sdma_state *ss)
69{
70	kref_get(&ss->kref);
71}
72
73static void sdma_complete(struct kref *kref)
74{
75	struct qib_sdma_state *ss =
76		container_of(kref, struct qib_sdma_state, kref);
77
78	complete(&ss->comp);
79}
80
81static void sdma_put(struct qib_sdma_state *ss)
82{
83	kref_put(&ss->kref, sdma_complete);
84}
85
86static void sdma_finalput(struct qib_sdma_state *ss)
87{
88	sdma_put(ss);
89	wait_for_completion(&ss->comp);
90}
91
92/*
93 * Complete all the sdma requests on the active list, in the correct
94 * order, and with appropriate processing.   Called when cleaning up
95 * after sdma shutdown, and when new sdma requests are submitted for
96 * a link that is down.   This matches what is done for requests
97 * that complete normally, it's just the full list.
98 *
99 * Must be called with sdma_lock held
100 */
101static void clear_sdma_activelist(struct qib_pportdata *ppd)
102{
103	struct qib_sdma_txreq *txp, *txp_next;
104
105	list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
106		list_del_init(&txp->list);
107		if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
108			unsigned idx;
109
110			idx = txp->start_idx;
111			while (idx != txp->next_descq_idx) {
112				unmap_desc(ppd, idx);
113				if (++idx == ppd->sdma_descq_cnt)
114					idx = 0;
115			}
116		}
117		if (txp->callback)
118			(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
119	}
120}
121
122static void sdma_sw_clean_up_task(struct tasklet_struct *t)
123{
124	struct qib_pportdata *ppd = from_tasklet(ppd, t,
125						 sdma_sw_clean_up_task);
126	unsigned long flags;
127
128	spin_lock_irqsave(&ppd->sdma_lock, flags);
129
130	/*
131	 * At this point, the following should always be true:
132	 * - We are halted, so no more descriptors are getting retired.
133	 * - We are not running, so no one is submitting new work.
134	 * - Only we can send the e40_sw_cleaned, so we can't start
135	 *   running again until we say so.  So, the active list and
136	 *   descq are ours to play with.
137	 */
138
139	/* Process all retired requests. */
140	qib_sdma_make_progress(ppd);
141
142	clear_sdma_activelist(ppd);
143
144	/*
145	 * Resync count of added and removed.  It is VERY important that
146	 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
147	 */
148	ppd->sdma_descq_removed = ppd->sdma_descq_added;
149
150	/*
151	 * Reset our notion of head and tail.
152	 * Note that the HW registers will be reset when switching states
153	 * due to calling __qib_sdma_process_event() below.
154	 */
155	ppd->sdma_descq_tail = 0;
156	ppd->sdma_descq_head = 0;
157	ppd->sdma_head_dma[0] = 0;
158	ppd->sdma_generation = 0;
159
160	__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
161
162	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
163}
164
165/*
166 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
167 * as a result of send buffer errors or send DMA descriptor errors.
168 * We want to disarm the buffers in these cases.
169 */
170static void sdma_hw_start_up(struct qib_pportdata *ppd)
171{
172	struct qib_sdma_state *ss = &ppd->sdma_state;
173	unsigned bufno;
174
175	for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
176		ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
177
178	ppd->dd->f_sdma_hw_start_up(ppd);
179}
180
181static void sdma_sw_tear_down(struct qib_pportdata *ppd)
182{
183	struct qib_sdma_state *ss = &ppd->sdma_state;
184
185	/* Releasing this reference means the state machine has stopped. */
186	sdma_put(ss);
187}
188
189static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
190{
191	tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
192}
193
194static void sdma_set_state(struct qib_pportdata *ppd,
195	enum qib_sdma_states next_state)
196{
197	struct qib_sdma_state *ss = &ppd->sdma_state;
198	struct sdma_set_state_action *action = ss->set_state_action;
199	unsigned op = 0;
200
201	/* debugging bookkeeping */
202	ss->previous_state = ss->current_state;
203	ss->previous_op = ss->current_op;
204
205	ss->current_state = next_state;
206
207	if (action[next_state].op_enable)
208		op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
209
210	if (action[next_state].op_intenable)
211		op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
212
213	if (action[next_state].op_halt)
214		op |= QIB_SDMA_SENDCTRL_OP_HALT;
215
216	if (action[next_state].op_drain)
217		op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
218
219	if (action[next_state].go_s99_running_tofalse)
220		ss->go_s99_running = 0;
221
222	if (action[next_state].go_s99_running_totrue)
223		ss->go_s99_running = 1;
224
225	ss->current_op = op;
226
227	ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
228}
229
230static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
231{
232	__le64 *descqp = &ppd->sdma_descq[head].qw[0];
233	u64 desc[2];
234	dma_addr_t addr;
235	size_t len;
236
237	desc[0] = le64_to_cpu(descqp[0]);
238	desc[1] = le64_to_cpu(descqp[1]);
239
240	addr = (desc[1] << 32) | (desc[0] >> 32);
241	len = (desc[0] >> 14) & (0x7ffULL << 2);
242	dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
243}
244
245static int alloc_sdma(struct qib_pportdata *ppd)
246{
247	ppd->sdma_descq_cnt = sdma_descq_cnt;
248	if (!ppd->sdma_descq_cnt)
249		ppd->sdma_descq_cnt = 256;
250
251	/* Allocate memory for SendDMA descriptor FIFO */
252	ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
253		ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
254		GFP_KERNEL);
255
256	if (!ppd->sdma_descq) {
257		qib_dev_err(ppd->dd,
258			"failed to allocate SendDMA descriptor FIFO memory\n");
259		goto bail;
260	}
261
262	/* Allocate memory for DMA of head register to memory */
263	ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
264		PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
265	if (!ppd->sdma_head_dma) {
266		qib_dev_err(ppd->dd,
267			"failed to allocate SendDMA head memory\n");
268		goto cleanup_descq;
269	}
270	ppd->sdma_head_dma[0] = 0;
271	return 0;
272
273cleanup_descq:
274	dma_free_coherent(&ppd->dd->pcidev->dev,
275		ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
276		ppd->sdma_descq_phys);
277	ppd->sdma_descq = NULL;
278	ppd->sdma_descq_phys = 0;
279bail:
280	ppd->sdma_descq_cnt = 0;
281	return -ENOMEM;
282}
283
284static void free_sdma(struct qib_pportdata *ppd)
285{
286	struct qib_devdata *dd = ppd->dd;
287
288	if (ppd->sdma_head_dma) {
289		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
290				  (void *)ppd->sdma_head_dma,
291				  ppd->sdma_head_phys);
292		ppd->sdma_head_dma = NULL;
293		ppd->sdma_head_phys = 0;
294	}
295
296	if (ppd->sdma_descq) {
297		dma_free_coherent(&dd->pcidev->dev,
298				  ppd->sdma_descq_cnt * sizeof(u64[2]),
299				  ppd->sdma_descq, ppd->sdma_descq_phys);
300		ppd->sdma_descq = NULL;
301		ppd->sdma_descq_phys = 0;
302	}
303}
304
305static inline void make_sdma_desc(struct qib_pportdata *ppd,
306				  u64 *sdmadesc, u64 addr, u64 dwlen,
307				  u64 dwoffset)
308{
309
310	WARN_ON(addr & 3);
311	/* SDmaPhyAddr[47:32] */
312	sdmadesc[1] = addr >> 32;
313	/* SDmaPhyAddr[31:0] */
314	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
315	/* SDmaGeneration[1:0] */
316	sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
317		SDMA_DESC_GEN_LSB;
318	/* SDmaDwordCount[10:0] */
319	sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
320	/* SDmaBufOffset[12:2] */
321	sdmadesc[0] |= dwoffset & 0x7ffULL;
322}
323
324/* sdma_lock must be held */
325int qib_sdma_make_progress(struct qib_pportdata *ppd)
326{
327	struct list_head *lp = NULL;
328	struct qib_sdma_txreq *txp = NULL;
329	struct qib_devdata *dd = ppd->dd;
330	int progress = 0;
331	u16 hwhead;
332	u16 idx = 0;
333
334	hwhead = dd->f_sdma_gethead(ppd);
335
336	/* The reason for some of the complexity of this code is that
337	 * not all descriptors have corresponding txps.  So, we have to
338	 * be able to skip over descs until we wander into the range of
339	 * the next txp on the list.
340	 */
341
342	if (!list_empty(&ppd->sdma_activelist)) {
343		lp = ppd->sdma_activelist.next;
344		txp = list_entry(lp, struct qib_sdma_txreq, list);
345		idx = txp->start_idx;
346	}
347
348	while (ppd->sdma_descq_head != hwhead) {
349		/* if desc is part of this txp, unmap if needed */
350		if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
351		    (idx == ppd->sdma_descq_head)) {
352			unmap_desc(ppd, ppd->sdma_descq_head);
353			if (++idx == ppd->sdma_descq_cnt)
354				idx = 0;
355		}
356
357		/* increment dequed desc count */
358		ppd->sdma_descq_removed++;
359
360		/* advance head, wrap if needed */
361		if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
362			ppd->sdma_descq_head = 0;
363
364		/* if now past this txp's descs, do the callback */
365		if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
366			/* remove from active list */
367			list_del_init(&txp->list);
368			if (txp->callback)
369				(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
370			/* see if there is another txp */
371			if (list_empty(&ppd->sdma_activelist))
372				txp = NULL;
373			else {
374				lp = ppd->sdma_activelist.next;
375				txp = list_entry(lp, struct qib_sdma_txreq,
376					list);
377				idx = txp->start_idx;
378			}
379		}
380		progress = 1;
381	}
382	if (progress)
383		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
384	return progress;
385}
386
387/*
388 * This is called from interrupt context.
389 */
390void qib_sdma_intr(struct qib_pportdata *ppd)
391{
392	unsigned long flags;
393
394	spin_lock_irqsave(&ppd->sdma_lock, flags);
395
396	__qib_sdma_intr(ppd);
397
398	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
399}
400
401void __qib_sdma_intr(struct qib_pportdata *ppd)
402{
403	if (__qib_sdma_running(ppd)) {
404		qib_sdma_make_progress(ppd);
405		if (!list_empty(&ppd->sdma_userpending))
406			qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
407	}
408}
409
410int qib_setup_sdma(struct qib_pportdata *ppd)
411{
412	struct qib_devdata *dd = ppd->dd;
413	unsigned long flags;
414	int ret = 0;
415
416	ret = alloc_sdma(ppd);
417	if (ret)
418		goto bail;
419
420	/* set consistent sdma state */
421	ppd->dd->f_sdma_init_early(ppd);
422	spin_lock_irqsave(&ppd->sdma_lock, flags);
423	sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
424	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
425
426	/* set up reference counting */
427	kref_init(&ppd->sdma_state.kref);
428	init_completion(&ppd->sdma_state.comp);
429
430	ppd->sdma_generation = 0;
431	ppd->sdma_descq_head = 0;
432	ppd->sdma_descq_removed = 0;
433	ppd->sdma_descq_added = 0;
434
435	ppd->sdma_intrequest = 0;
436	INIT_LIST_HEAD(&ppd->sdma_userpending);
437
438	INIT_LIST_HEAD(&ppd->sdma_activelist);
439
440	tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
441
442	ret = dd->f_init_sdma_regs(ppd);
443	if (ret)
444		goto bail_alloc;
445
446	qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
447
448	return 0;
449
450bail_alloc:
451	qib_teardown_sdma(ppd);
452bail:
453	return ret;
454}
455
456void qib_teardown_sdma(struct qib_pportdata *ppd)
457{
458	qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
459
460	/*
461	 * This waits for the state machine to exit so it is not
462	 * necessary to kill the sdma_sw_clean_up_task to make sure
463	 * it is not running.
464	 */
465	sdma_finalput(&ppd->sdma_state);
466
467	free_sdma(ppd);
468}
469
470int qib_sdma_running(struct qib_pportdata *ppd)
471{
472	unsigned long flags;
473	int ret;
474
475	spin_lock_irqsave(&ppd->sdma_lock, flags);
476	ret = __qib_sdma_running(ppd);
477	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
478
479	return ret;
480}
481
482/*
483 * Complete a request when sdma not running; likely only request
484 * but to simplify the code, always queue it, then process the full
485 * activelist.  We process the entire list to ensure that this particular
486 * request does get it's callback, but in the correct order.
487 * Must be called with sdma_lock held
488 */
489static void complete_sdma_err_req(struct qib_pportdata *ppd,
490				  struct qib_verbs_txreq *tx)
491{
492	struct qib_qp_priv *priv = tx->qp->priv;
493
494	atomic_inc(&priv->s_dma_busy);
495	/* no sdma descriptors, so no unmap_desc */
496	tx->txreq.start_idx = 0;
497	tx->txreq.next_descq_idx = 0;
498	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
499	clear_sdma_activelist(ppd);
500}
501
502/*
503 * This function queues one IB packet onto the send DMA queue per call.
504 * The caller is responsible for checking:
505 * 1) The number of send DMA descriptor entries is less than the size of
506 *    the descriptor queue.
507 * 2) The IB SGE addresses and lengths are 32-bit aligned
508 *    (except possibly the last SGE's length)
509 * 3) The SGE addresses are suitable for passing to dma_map_single().
510 */
511int qib_sdma_verbs_send(struct qib_pportdata *ppd,
512			struct rvt_sge_state *ss, u32 dwords,
513			struct qib_verbs_txreq *tx)
514{
515	unsigned long flags;
516	struct rvt_sge *sge;
517	struct rvt_qp *qp;
518	int ret = 0;
519	u16 tail;
520	__le64 *descqp;
521	u64 sdmadesc[2];
522	u32 dwoffset;
523	dma_addr_t addr;
524	struct qib_qp_priv *priv;
525
526	spin_lock_irqsave(&ppd->sdma_lock, flags);
527
528retry:
529	if (unlikely(!__qib_sdma_running(ppd))) {
530		complete_sdma_err_req(ppd, tx);
531		goto unlock;
532	}
533
534	if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
535		if (qib_sdma_make_progress(ppd))
536			goto retry;
537		if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
538			ppd->dd->f_sdma_set_desc_cnt(ppd,
539					ppd->sdma_descq_cnt / 2);
540		goto busy;
541	}
542
543	dwoffset = tx->hdr_dwords;
544	make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
545
546	sdmadesc[0] |= SDMA_DESC_FIRST;
547	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
548		sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
549
550	/* write to the descq */
551	tail = ppd->sdma_descq_tail;
552	descqp = &ppd->sdma_descq[tail].qw[0];
553	*descqp++ = cpu_to_le64(sdmadesc[0]);
554	*descqp++ = cpu_to_le64(sdmadesc[1]);
555
556	/* increment the tail */
557	if (++tail == ppd->sdma_descq_cnt) {
558		tail = 0;
559		descqp = &ppd->sdma_descq[0].qw[0];
560		++ppd->sdma_generation;
561	}
562
563	tx->txreq.start_idx = tail;
564
565	sge = &ss->sge;
566	while (dwords) {
567		u32 dw;
568		u32 len = rvt_get_sge_length(sge, dwords << 2);
569
570		dw = (len + 3) >> 2;
571		addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
572				      dw << 2, DMA_TO_DEVICE);
573		if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
574			ret = -ENOMEM;
575			goto unmap;
576		}
577		sdmadesc[0] = 0;
578		make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
579		/* SDmaUseLargeBuf has to be set in every descriptor */
580		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
581			sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
582		/* write to the descq */
583		*descqp++ = cpu_to_le64(sdmadesc[0]);
584		*descqp++ = cpu_to_le64(sdmadesc[1]);
585
586		/* increment the tail */
587		if (++tail == ppd->sdma_descq_cnt) {
588			tail = 0;
589			descqp = &ppd->sdma_descq[0].qw[0];
590			++ppd->sdma_generation;
591		}
592		rvt_update_sge(ss, len, false);
593		dwoffset += dw;
594		dwords -= dw;
595	}
596
597	if (!tail)
598		descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
599	descqp -= 2;
600	descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
601	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
602		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
603	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
604		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
605	priv = tx->qp->priv;
606	atomic_inc(&priv->s_dma_busy);
607	tx->txreq.next_descq_idx = tail;
608	ppd->dd->f_sdma_update_tail(ppd, tail);
609	ppd->sdma_descq_added += tx->txreq.sg_count;
610	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
611	goto unlock;
612
613unmap:
614	for (;;) {
615		if (!tail)
616			tail = ppd->sdma_descq_cnt - 1;
617		else
618			tail--;
619		if (tail == ppd->sdma_descq_tail)
620			break;
621		unmap_desc(ppd, tail);
622	}
623	qp = tx->qp;
624	priv = qp->priv;
625	qib_put_txreq(tx);
626	spin_lock(&qp->r_lock);
627	spin_lock(&qp->s_lock);
628	if (qp->ibqp.qp_type == IB_QPT_RC) {
629		/* XXX what about error sending RDMA read responses? */
630		if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
631			rvt_error_qp(qp, IB_WC_GENERAL_ERR);
632	} else if (qp->s_wqe)
633		rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
634	spin_unlock(&qp->s_lock);
635	spin_unlock(&qp->r_lock);
636	/* return zero to process the next send work request */
637	goto unlock;
638
639busy:
640	qp = tx->qp;
641	priv = qp->priv;
642	spin_lock(&qp->s_lock);
643	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
644		struct qib_ibdev *dev;
645
646		/*
647		 * If we couldn't queue the DMA request, save the info
648		 * and try again later rather than destroying the
649		 * buffer and undoing the side effects of the copy.
650		 */
651		tx->ss = ss;
652		tx->dwords = dwords;
653		priv->s_tx = tx;
654		dev = &ppd->dd->verbs_dev;
655		spin_lock(&dev->rdi.pending_lock);
656		if (list_empty(&priv->iowait)) {
657			struct qib_ibport *ibp;
658
659			ibp = &ppd->ibport_data;
660			ibp->rvp.n_dmawait++;
661			qp->s_flags |= RVT_S_WAIT_DMA_DESC;
662			list_add_tail(&priv->iowait, &dev->dmawait);
663		}
664		spin_unlock(&dev->rdi.pending_lock);
665		qp->s_flags &= ~RVT_S_BUSY;
666		spin_unlock(&qp->s_lock);
667		ret = -EBUSY;
668	} else {
669		spin_unlock(&qp->s_lock);
670		qib_put_txreq(tx);
671	}
672unlock:
673	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
674	return ret;
675}
676
677/*
678 * sdma_lock should be acquired before calling this routine
679 */
680void dump_sdma_state(struct qib_pportdata *ppd)
681{
682	struct qib_sdma_desc *descq;
683	struct qib_sdma_txreq *txp, *txpnext;
684	__le64 *descqp;
685	u64 desc[2];
686	u64 addr;
687	u16 gen, dwlen, dwoffset;
688	u16 head, tail, cnt;
689
690	head = ppd->sdma_descq_head;
691	tail = ppd->sdma_descq_tail;
692	cnt = qib_sdma_descq_freecnt(ppd);
693	descq = ppd->sdma_descq;
694
695	qib_dev_porterr(ppd->dd, ppd->port,
696		"SDMA ppd->sdma_descq_head: %u\n", head);
697	qib_dev_porterr(ppd->dd, ppd->port,
698		"SDMA ppd->sdma_descq_tail: %u\n", tail);
699	qib_dev_porterr(ppd->dd, ppd->port,
700		"SDMA sdma_descq_freecnt: %u\n", cnt);
701
702	/* print info for each entry in the descriptor queue */
703	while (head != tail) {
704		char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
705
706		descqp = &descq[head].qw[0];
707		desc[0] = le64_to_cpu(descqp[0]);
708		desc[1] = le64_to_cpu(descqp[1]);
709		flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
710		flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
711		flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
712		flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
713		flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
714		addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
715		gen = (desc[0] >> 30) & 3ULL;
716		dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
717		dwoffset = (desc[0] & 0x7ffULL) << 2;
718		qib_dev_porterr(ppd->dd, ppd->port,
719			"SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
720			 head, flags, addr, gen, dwlen, dwoffset);
721		if (++head == ppd->sdma_descq_cnt)
722			head = 0;
723	}
724
725	/* print dma descriptor indices from the TX requests */
726	list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
727				 list)
728		qib_dev_porterr(ppd->dd, ppd->port,
729			"SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
730			txp->start_idx, txp->next_descq_idx);
731}
732
733void qib_sdma_process_event(struct qib_pportdata *ppd,
734	enum qib_sdma_events event)
735{
736	unsigned long flags;
737
738	spin_lock_irqsave(&ppd->sdma_lock, flags);
739
740	__qib_sdma_process_event(ppd, event);
741
742	if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
743		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
744
745	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
746}
747
748void __qib_sdma_process_event(struct qib_pportdata *ppd,
749	enum qib_sdma_events event)
750{
751	struct qib_sdma_state *ss = &ppd->sdma_state;
752
753	switch (ss->current_state) {
754	case qib_sdma_state_s00_hw_down:
755		switch (event) {
756		case qib_sdma_event_e00_go_hw_down:
757			break;
758		case qib_sdma_event_e30_go_running:
759			/*
760			 * If down, but running requested (usually result
761			 * of link up, then we need to start up.
762			 * This can happen when hw down is requested while
763			 * bringing the link up with traffic active on
764			 * 7220, e.g. */
765			ss->go_s99_running = 1;
766			fallthrough;	/* and start dma engine */
767		case qib_sdma_event_e10_go_hw_start:
768			/* This reference means the state machine is started */
769			sdma_get(&ppd->sdma_state);
770			sdma_set_state(ppd,
771				       qib_sdma_state_s10_hw_start_up_wait);
772			break;
773		case qib_sdma_event_e20_hw_started:
774			break;
775		case qib_sdma_event_e40_sw_cleaned:
776			sdma_sw_tear_down(ppd);
777			break;
778		case qib_sdma_event_e50_hw_cleaned:
779			break;
780		case qib_sdma_event_e60_hw_halted:
781			break;
782		case qib_sdma_event_e70_go_idle:
783			break;
784		case qib_sdma_event_e7220_err_halted:
785			break;
786		case qib_sdma_event_e7322_err_halted:
787			break;
788		case qib_sdma_event_e90_timer_tick:
789			break;
790		}
791		break;
792
793	case qib_sdma_state_s10_hw_start_up_wait:
794		switch (event) {
795		case qib_sdma_event_e00_go_hw_down:
796			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
797			sdma_sw_tear_down(ppd);
798			break;
799		case qib_sdma_event_e10_go_hw_start:
800			break;
801		case qib_sdma_event_e20_hw_started:
802			sdma_set_state(ppd, ss->go_s99_running ?
803				       qib_sdma_state_s99_running :
804				       qib_sdma_state_s20_idle);
805			break;
806		case qib_sdma_event_e30_go_running:
807			ss->go_s99_running = 1;
808			break;
809		case qib_sdma_event_e40_sw_cleaned:
810			break;
811		case qib_sdma_event_e50_hw_cleaned:
812			break;
813		case qib_sdma_event_e60_hw_halted:
814			break;
815		case qib_sdma_event_e70_go_idle:
816			ss->go_s99_running = 0;
817			break;
818		case qib_sdma_event_e7220_err_halted:
819			break;
820		case qib_sdma_event_e7322_err_halted:
821			break;
822		case qib_sdma_event_e90_timer_tick:
823			break;
824		}
825		break;
826
827	case qib_sdma_state_s20_idle:
828		switch (event) {
829		case qib_sdma_event_e00_go_hw_down:
830			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
831			sdma_sw_tear_down(ppd);
832			break;
833		case qib_sdma_event_e10_go_hw_start:
834			break;
835		case qib_sdma_event_e20_hw_started:
836			break;
837		case qib_sdma_event_e30_go_running:
838			sdma_set_state(ppd, qib_sdma_state_s99_running);
839			ss->go_s99_running = 1;
840			break;
841		case qib_sdma_event_e40_sw_cleaned:
842			break;
843		case qib_sdma_event_e50_hw_cleaned:
844			break;
845		case qib_sdma_event_e60_hw_halted:
846			break;
847		case qib_sdma_event_e70_go_idle:
848			break;
849		case qib_sdma_event_e7220_err_halted:
850			break;
851		case qib_sdma_event_e7322_err_halted:
852			break;
853		case qib_sdma_event_e90_timer_tick:
854			break;
855		}
856		break;
857
858	case qib_sdma_state_s30_sw_clean_up_wait:
859		switch (event) {
860		case qib_sdma_event_e00_go_hw_down:
861			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
862			break;
863		case qib_sdma_event_e10_go_hw_start:
864			break;
865		case qib_sdma_event_e20_hw_started:
866			break;
867		case qib_sdma_event_e30_go_running:
868			ss->go_s99_running = 1;
869			break;
870		case qib_sdma_event_e40_sw_cleaned:
871			sdma_set_state(ppd,
872				       qib_sdma_state_s10_hw_start_up_wait);
873			sdma_hw_start_up(ppd);
874			break;
875		case qib_sdma_event_e50_hw_cleaned:
876			break;
877		case qib_sdma_event_e60_hw_halted:
878			break;
879		case qib_sdma_event_e70_go_idle:
880			ss->go_s99_running = 0;
881			break;
882		case qib_sdma_event_e7220_err_halted:
883			break;
884		case qib_sdma_event_e7322_err_halted:
885			break;
886		case qib_sdma_event_e90_timer_tick:
887			break;
888		}
889		break;
890
891	case qib_sdma_state_s40_hw_clean_up_wait:
892		switch (event) {
893		case qib_sdma_event_e00_go_hw_down:
894			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
895			sdma_start_sw_clean_up(ppd);
896			break;
897		case qib_sdma_event_e10_go_hw_start:
898			break;
899		case qib_sdma_event_e20_hw_started:
900			break;
901		case qib_sdma_event_e30_go_running:
902			ss->go_s99_running = 1;
903			break;
904		case qib_sdma_event_e40_sw_cleaned:
905			break;
906		case qib_sdma_event_e50_hw_cleaned:
907			sdma_set_state(ppd,
908				       qib_sdma_state_s30_sw_clean_up_wait);
909			sdma_start_sw_clean_up(ppd);
910			break;
911		case qib_sdma_event_e60_hw_halted:
912			break;
913		case qib_sdma_event_e70_go_idle:
914			ss->go_s99_running = 0;
915			break;
916		case qib_sdma_event_e7220_err_halted:
917			break;
918		case qib_sdma_event_e7322_err_halted:
919			break;
920		case qib_sdma_event_e90_timer_tick:
921			break;
922		}
923		break;
924
925	case qib_sdma_state_s50_hw_halt_wait:
926		switch (event) {
927		case qib_sdma_event_e00_go_hw_down:
928			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
929			sdma_start_sw_clean_up(ppd);
930			break;
931		case qib_sdma_event_e10_go_hw_start:
932			break;
933		case qib_sdma_event_e20_hw_started:
934			break;
935		case qib_sdma_event_e30_go_running:
936			ss->go_s99_running = 1;
937			break;
938		case qib_sdma_event_e40_sw_cleaned:
939			break;
940		case qib_sdma_event_e50_hw_cleaned:
941			break;
942		case qib_sdma_event_e60_hw_halted:
943			sdma_set_state(ppd,
944				       qib_sdma_state_s40_hw_clean_up_wait);
945			ppd->dd->f_sdma_hw_clean_up(ppd);
946			break;
947		case qib_sdma_event_e70_go_idle:
948			ss->go_s99_running = 0;
949			break;
950		case qib_sdma_event_e7220_err_halted:
951			break;
952		case qib_sdma_event_e7322_err_halted:
953			break;
954		case qib_sdma_event_e90_timer_tick:
955			break;
956		}
957		break;
958
959	case qib_sdma_state_s99_running:
960		switch (event) {
961		case qib_sdma_event_e00_go_hw_down:
962			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
963			sdma_start_sw_clean_up(ppd);
964			break;
965		case qib_sdma_event_e10_go_hw_start:
966			break;
967		case qib_sdma_event_e20_hw_started:
968			break;
969		case qib_sdma_event_e30_go_running:
970			break;
971		case qib_sdma_event_e40_sw_cleaned:
972			break;
973		case qib_sdma_event_e50_hw_cleaned:
974			break;
975		case qib_sdma_event_e60_hw_halted:
976			sdma_set_state(ppd,
977				       qib_sdma_state_s30_sw_clean_up_wait);
978			sdma_start_sw_clean_up(ppd);
979			break;
980		case qib_sdma_event_e70_go_idle:
981			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
982			ss->go_s99_running = 0;
983			break;
984		case qib_sdma_event_e7220_err_halted:
985			sdma_set_state(ppd,
986				       qib_sdma_state_s30_sw_clean_up_wait);
987			sdma_start_sw_clean_up(ppd);
988			break;
989		case qib_sdma_event_e7322_err_halted:
990			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
991			break;
992		case qib_sdma_event_e90_timer_tick:
993			break;
994		}
995		break;
996	}
997
998	ss->last_event = event;
999}
1000