1/*-
2 * Copyright (c) 2011-2012 Semihalf.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/param.h>
28#include <sys/systm.h>
29#include <sys/kernel.h>
30#include <sys/bus.h>
31#include <sys/lock.h>
32#include <sys/module.h>
33#include <sys/mutex.h>
34#include <sys/proc.h>
35#include <sys/pcpu.h>
36#include <sys/rman.h>
37#include <sys/sched.h>
38#include <sys/smp.h>
39
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <machine/tlb.h>
43
44#include "qman.h"
45#include "portals.h"
46
47extern struct dpaa_portals_softc *qp_sc;
48static struct qman_softc *qman_sc;
49
50extern t_Handle qman_portal_setup(struct qman_softc *qsc);
51
52static void
53qman_exception(t_Handle app, e_QmExceptions exception)
54{
55	struct qman_softc *sc;
56	const char *message;
57
58	sc = app;
59
60	switch (exception) {
61	case e_QM_EX_CORENET_INITIATOR_DATA:
62		message = "Initiator Data Error";
63		break;
64	case e_QM_EX_CORENET_TARGET_DATA:
65		message = "CoreNet Target Data Error";
66		break;
67	case e_QM_EX_CORENET_INVALID_TARGET_TRANSACTION:
68		message = "Invalid Target Transaction";
69		break;
70	case e_QM_EX_PFDR_THRESHOLD:
71		message = "PFDR Low Watermark Interrupt";
72		break;
73	case e_QM_EX_PFDR_ENQUEUE_BLOCKED:
74		message = "PFDR Enqueues Blocked Interrupt";
75		break;
76	case e_QM_EX_SINGLE_ECC:
77		message = "Single Bit ECC Error Interrupt";
78		break;
79	case e_QM_EX_MULTI_ECC:
80		message = "Multi Bit ECC Error Interrupt";
81		break;
82	case e_QM_EX_INVALID_COMMAND:
83		message = "Invalid Command Verb Interrupt";
84		break;
85	case e_QM_EX_DEQUEUE_DCP:
86		message = "Invalid Dequeue Direct Connect Portal Interrupt";
87		break;
88	case e_QM_EX_DEQUEUE_FQ:
89		message = "Invalid Dequeue FQ Interrupt";
90		break;
91	case e_QM_EX_DEQUEUE_SOURCE:
92		message = "Invalid Dequeue Source Interrupt";
93		break;
94	case e_QM_EX_DEQUEUE_QUEUE:
95		message = "Invalid Dequeue Queue Interrupt";
96		break;
97	case e_QM_EX_ENQUEUE_OVERFLOW:
98		message = "Invalid Enqueue Overflow Interrupt";
99		break;
100	case e_QM_EX_ENQUEUE_STATE:
101		message = "Invalid Enqueue State Interrupt";
102		break;
103	case e_QM_EX_ENQUEUE_CHANNEL:
104		message = "Invalid Enqueue Channel Interrupt";
105		break;
106	case e_QM_EX_ENQUEUE_QUEUE:
107		message = "Invalid Enqueue Queue Interrupt";
108		break;
109	case e_QM_EX_CG_STATE_CHANGE:
110		message = "CG change state notification";
111		break;
112	default:
113		message = "Unknown error";
114	}
115
116	device_printf(sc->sc_dev, "QMan Exception: %s.\n", message);
117}
118
119/**
120 * General received frame callback.
121 * This is called, when user did not register his own callback for a given
122 * frame queue range (fqr).
123 */
124e_RxStoreResponse
125qman_received_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal,
126    uint32_t fqid_offset, t_DpaaFD *frame)
127{
128	struct qman_softc *sc;
129
130	sc = app;
131
132	device_printf(sc->sc_dev, "dummy callback for received frame.\n");
133	return (e_RX_STORE_RESPONSE_CONTINUE);
134}
135
136/**
137 * General rejected frame callback.
138 * This is called, when user did not register his own callback for a given
139 * frame queue range (fqr).
140 */
141e_RxStoreResponse
142qman_rejected_frame_callback(t_Handle app, t_Handle qm_fqr, t_Handle qm_portal,
143    uint32_t fqid_offset, t_DpaaFD *frame,
144    t_QmRejectedFrameInfo *qm_rejected_frame_info)
145{
146	struct qman_softc *sc;
147
148	sc = app;
149
150	device_printf(sc->sc_dev, "dummy callback for rejected frame.\n");
151	return (e_RX_STORE_RESPONSE_CONTINUE);
152}
153
154int
155qman_attach(device_t dev)
156{
157	struct qman_softc *sc;
158	t_QmParam qp;
159	t_Error error;
160	t_QmRevisionInfo rev;
161
162	sc = device_get_softc(dev);
163	sc->sc_dev = dev;
164	qman_sc = sc;
165
166	if (XX_MallocSmartInit() != E_OK) {
167		device_printf(dev, "could not initialize smart allocator.\n");
168		return (ENXIO);
169	}
170
171	sched_pin();
172
173	/* Allocate resources */
174	sc->sc_rrid = 0;
175	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY,
176	    &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE);
177	if (sc->sc_rres == NULL) {
178		device_printf(dev, "could not allocate memory.\n");
179		goto err;
180	}
181
182	sc->sc_irid = 0;
183	sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
184	    &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
185	if (sc->sc_ires == NULL) {
186		device_printf(dev, "could not allocate error interrupt.\n");
187		goto err;
188	}
189
190	if (qp_sc == NULL)
191		goto err;
192
193	dpaa_portal_map_registers(qp_sc);
194
195	/* Initialize QMan */
196	qp.guestId = NCSW_MASTER_ID;
197	qp.baseAddress = rman_get_bushandle(sc->sc_rres);
198	qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]);
199	qp.liodn = 0;
200	qp.totalNumOfFqids = QMAN_MAX_FQIDS;
201	qp.fqdMemPartitionId = NCSW_MASTER_ID;
202	qp.pfdrMemPartitionId = NCSW_MASTER_ID;
203	qp.f_Exception = qman_exception;
204	qp.h_App = sc;
205	qp.errIrq = (uintptr_t)sc->sc_ires;
206	qp.partFqidBase = QMAN_FQID_BASE;
207	qp.partNumOfFqids = QMAN_MAX_FQIDS;
208	qp.partCgsBase = 0;
209	qp.partNumOfCgs = 0;
210
211	sc->sc_qh = QM_Config(&qp);
212	if (sc->sc_qh == NULL) {
213		device_printf(dev, "could not be configured\n");
214		goto err;
215	}
216
217	error = QM_Init(sc->sc_qh);
218	if (error != E_OK) {
219		device_printf(dev, "could not be initialized\n");
220		goto err;
221	}
222
223	error = QM_GetRevision(sc->sc_qh, &rev);
224	if (error != E_OK) {
225		device_printf(dev, "could not get QMan revision\n");
226		goto err;
227	}
228
229	device_printf(dev, "Hardware version: %d.%d.\n",
230	    rev.majorRev, rev.minorRev);
231
232	sched_unpin();
233
234	qman_portal_setup(sc);
235
236	return (0);
237
238err:
239	sched_unpin();
240	qman_detach(dev);
241	return (ENXIO);
242}
243
244int
245qman_detach(device_t dev)
246{
247	struct qman_softc *sc;
248
249	sc = device_get_softc(dev);
250
251	if (sc->sc_qh)
252		QM_Free(sc->sc_qh);
253
254	if (sc->sc_ires != NULL)
255		XX_DeallocIntr((uintptr_t)sc->sc_ires);
256
257	if (sc->sc_ires != NULL)
258		bus_release_resource(dev, SYS_RES_IRQ,
259		    sc->sc_irid, sc->sc_ires);
260
261	if (sc->sc_rres != NULL)
262		bus_release_resource(dev, SYS_RES_MEMORY,
263		    sc->sc_rrid, sc->sc_rres);
264
265	return (0);
266}
267
268int
269qman_suspend(device_t dev)
270{
271
272	return (0);
273}
274
275int
276qman_resume(device_t dev)
277{
278
279	return (0);
280}
281
282int
283qman_shutdown(device_t dev)
284{
285
286	return (0);
287}
288
289
290/**
291 * @group QMan API functions implementation.
292 * @{
293 */
294
295t_Handle
296qman_fqr_create(uint32_t fqids_num, e_QmFQChannel channel, uint8_t wq,
297    bool force_fqid, uint32_t fqid_or_align, bool init_parked,
298    bool hold_active, bool prefer_in_cache, bool congst_avoid_ena,
299    t_Handle congst_group, int8_t overhead_accounting_len,
300    uint32_t tail_drop_threshold)
301{
302	struct qman_softc *sc;
303	t_QmFqrParams fqr;
304	t_Handle fqrh, portal;
305
306	sc = qman_sc;
307
308	sched_pin();
309
310	/* Ensure we have got QMan port initialized */
311	portal = qman_portal_setup(sc);
312	if (portal == NULL) {
313		device_printf(sc->sc_dev, "could not setup QMan portal\n");
314		goto err;
315	}
316
317	fqr.h_Qm = sc->sc_qh;
318	fqr.h_QmPortal = portal;
319	fqr.initParked = init_parked;
320	fqr.holdActive = hold_active;
321	fqr.preferInCache = prefer_in_cache;
322
323	/* We do not support stashing */
324	fqr.useContextAForStash = FALSE;
325	fqr.p_ContextA = 0;
326	fqr.p_ContextB = 0;
327
328	fqr.channel = channel;
329	fqr.wq = wq;
330	fqr.shadowMode = FALSE;
331	fqr.numOfFqids = fqids_num;
332
333	/* FQID */
334	fqr.useForce = force_fqid;
335	if (force_fqid) {
336		fqr.qs.frcQ.fqid = fqid_or_align;
337	} else {
338		fqr.qs.nonFrcQs.align = fqid_or_align;
339	}
340
341	/* Congestion Avoidance */
342	fqr.congestionAvoidanceEnable = congst_avoid_ena;
343	if (congst_avoid_ena) {
344		fqr.congestionAvoidanceParams.h_QmCg = congst_group;
345		fqr.congestionAvoidanceParams.overheadAccountingLength =
346		    overhead_accounting_len;
347		fqr.congestionAvoidanceParams.fqTailDropThreshold =
348		    tail_drop_threshold;
349	} else {
350		fqr.congestionAvoidanceParams.h_QmCg = 0;
351		fqr.congestionAvoidanceParams.overheadAccountingLength = 0;
352		fqr.congestionAvoidanceParams.fqTailDropThreshold = 0;
353	}
354
355	fqrh = QM_FQR_Create(&fqr);
356	if (fqrh == NULL) {
357		device_printf(sc->sc_dev, "could not create Frame Queue Range"
358		    "\n");
359		goto err;
360	}
361
362	sc->sc_fqr_cpu[QM_FQR_GetFqid(fqrh)] = PCPU_GET(cpuid);
363
364	sched_unpin();
365
366	return (fqrh);
367
368err:
369	sched_unpin();
370
371	return (NULL);
372}
373
374t_Error
375qman_fqr_free(t_Handle fqr)
376{
377	struct qman_softc *sc;
378	t_Error error;
379
380	sc = qman_sc;
381	thread_lock(curthread);
382	sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]);
383	thread_unlock(curthread);
384
385	error = QM_FQR_Free(fqr);
386
387	thread_lock(curthread);
388	sched_unbind(curthread);
389	thread_unlock(curthread);
390
391	return (error);
392}
393
394t_Error
395qman_fqr_register_cb(t_Handle fqr, t_QmReceivedFrameCallback *callback,
396    t_Handle app)
397{
398	struct qman_softc *sc;
399	t_Error error;
400	t_Handle portal;
401
402	sc = qman_sc;
403	sched_pin();
404
405	/* Ensure we have got QMan port initialized */
406	portal = qman_portal_setup(sc);
407	if (portal == NULL) {
408		device_printf(sc->sc_dev, "could not setup QMan portal\n");
409		sched_unpin();
410		return (E_NOT_SUPPORTED);
411	}
412
413	error = QM_FQR_RegisterCB(fqr, callback, app);
414
415	sched_unpin();
416
417	return (error);
418}
419
420t_Error
421qman_fqr_enqueue(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame)
422{
423	struct qman_softc *sc;
424	t_Error error;
425	t_Handle portal;
426
427	sc = qman_sc;
428	sched_pin();
429
430	/* Ensure we have got QMan port initialized */
431	portal = qman_portal_setup(sc);
432	if (portal == NULL) {
433		device_printf(sc->sc_dev, "could not setup QMan portal\n");
434		sched_unpin();
435		return (E_NOT_SUPPORTED);
436	}
437
438	error = QM_FQR_Enqueue(fqr, portal, fqid_off, frame);
439
440	sched_unpin();
441
442	return (error);
443}
444
445uint32_t
446qman_fqr_get_counter(t_Handle fqr, uint32_t fqid_off,
447    e_QmFqrCounters counter)
448{
449	struct qman_softc *sc;
450	uint32_t val;
451	t_Handle portal;
452
453	sc = qman_sc;
454	sched_pin();
455
456	/* Ensure we have got QMan port initialized */
457	portal = qman_portal_setup(sc);
458	if (portal == NULL) {
459		device_printf(sc->sc_dev, "could not setup QMan portal\n");
460		sched_unpin();
461		return (0);
462	}
463
464	val = QM_FQR_GetCounter(fqr, portal, fqid_off, counter);
465
466	sched_unpin();
467
468	return (val);
469}
470
471t_Error
472qman_fqr_pull_frame(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame)
473{
474	struct qman_softc *sc;
475	t_Error error;
476	t_Handle portal;
477
478	sc = qman_sc;
479	sched_pin();
480
481	/* Ensure we have got QMan port initialized */
482	portal = qman_portal_setup(sc);
483	if (portal == NULL) {
484		device_printf(sc->sc_dev, "could not setup QMan portal\n");
485		sched_unpin();
486		return (E_NOT_SUPPORTED);
487	}
488
489	error = QM_FQR_PullFrame(fqr, portal, fqid_off, frame);
490
491	sched_unpin();
492
493	return (error);
494}
495
496uint32_t
497qman_fqr_get_base_fqid(t_Handle fqr)
498{
499	struct qman_softc *sc;
500	uint32_t val;
501	t_Handle portal;
502
503	sc = qman_sc;
504	sched_pin();
505
506	/* Ensure we have got QMan port initialized */
507	portal = qman_portal_setup(sc);
508	if (portal == NULL) {
509		device_printf(sc->sc_dev, "could not setup QMan portal\n");
510		sched_unpin();
511		return (0);
512	}
513
514	val = QM_FQR_GetFqid(fqr);
515
516	sched_unpin();
517
518	return (val);
519}
520
521t_Error
522qman_poll(e_QmPortalPollSource source)
523{
524	struct qman_softc *sc;
525	t_Error error;
526	t_Handle portal;
527
528	sc = qman_sc;
529	sched_pin();
530
531	/* Ensure we have got QMan port initialized */
532	portal = qman_portal_setup(sc);
533	if (portal == NULL) {
534		device_printf(sc->sc_dev, "could not setup QMan portal\n");
535		sched_unpin();
536		return (E_NOT_SUPPORTED);
537	}
538
539	error = QM_Poll(sc->sc_qh, source);
540
541	sched_unpin();
542
543	return (error);
544}
545
546/*
547 * TODO: add polling and/or congestion support.
548 */
549
550/** @} */
551