1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
5 *
6 */
7#ifndef __FSL_QBMAN_PORTAL_H
8#define __FSL_QBMAN_PORTAL_H
9
10#include <soc/fsl/dpaa2-fd.h>
11
12#define QMAN_REV_4000   0x04000000
13#define QMAN_REV_4100   0x04010000
14#define QMAN_REV_4101   0x04010001
15#define QMAN_REV_5000   0x05000000
16
17#define QMAN_REV_MASK   0xffff0000
18
19struct dpaa2_dq;
20struct qbman_swp;
21
22/* qbman software portal descriptor structure */
23struct qbman_swp_desc {
24	void *cena_bar; /* Cache-enabled portal base address */
25	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26	u32 qman_version;
27	u32 qman_clk;
28	u32 qman_256_cycles_per_ns;
29};
30
31#define QBMAN_SWP_INTERRUPT_EQRI 0x01
32#define QBMAN_SWP_INTERRUPT_EQDI 0x02
33#define QBMAN_SWP_INTERRUPT_DQRI 0x04
34#define QBMAN_SWP_INTERRUPT_RCRI 0x08
35#define QBMAN_SWP_INTERRUPT_RCDI 0x10
36#define QBMAN_SWP_INTERRUPT_VDCI 0x20
37
38/* the structure for pull dequeue descriptor */
39struct qbman_pull_desc {
40	u8 verb;
41	u8 numf;
42	u8 tok;
43	u8 reserved;
44	__le32 dq_src;
45	__le64 rsp_addr;
46	u64 rsp_addr_virt;
47	u8 padding[40];
48};
49
50enum qbman_pull_type_e {
51	/* dequeue with priority precedence, respect intra-class scheduling */
52	qbman_pull_type_prio = 1,
53	/* dequeue with active FQ precedence, respect ICS */
54	qbman_pull_type_active,
55	/* dequeue with active FQ precedence, no ICS */
56	qbman_pull_type_active_noics
57};
58
59/* Definitions for parsing dequeue entries */
60#define QBMAN_RESULT_MASK      0x7f
61#define QBMAN_RESULT_DQ        0x60
62#define QBMAN_RESULT_FQRN      0x21
63#define QBMAN_RESULT_FQRNI     0x22
64#define QBMAN_RESULT_FQPN      0x24
65#define QBMAN_RESULT_FQDAN     0x25
66#define QBMAN_RESULT_CDAN      0x26
67#define QBMAN_RESULT_CSCN_MEM  0x27
68#define QBMAN_RESULT_CGCU      0x28
69#define QBMAN_RESULT_BPSCN     0x29
70#define QBMAN_RESULT_CSCN_WQ   0x2a
71
72/* QBMan FQ management command codes */
73#define QBMAN_FQ_SCHEDULE	0x48
74#define QBMAN_FQ_FORCE		0x49
75#define QBMAN_FQ_XON		0x4d
76#define QBMAN_FQ_XOFF		0x4e
77
78/* structure of enqueue descriptor */
79struct qbman_eq_desc {
80	u8 verb;
81	u8 dca;
82	__le16 seqnum;
83	__le16 orpid;
84	__le16 reserved1;
85	__le32 tgtid;
86	__le32 tag;
87	__le16 qdbin;
88	u8 qpri;
89	u8 reserved[3];
90	u8 wae;
91	u8 rspid;
92	__le64 rsp_addr;
93};
94
95struct qbman_eq_desc_with_fd {
96	struct qbman_eq_desc desc;
97	u8 fd[32];
98};
99
100/* buffer release descriptor */
101struct qbman_release_desc {
102	u8 verb;
103	u8 reserved;
104	__le16 bpid;
105	__le32 reserved2;
106	__le64 buf[7];
107};
108
109/* Management command result codes */
110#define QBMAN_MC_RSLT_OK      0xf0
111
112#define CODE_CDAN_WE_EN    0x1
113#define CODE_CDAN_WE_CTX   0x4
114
115/* portal data structure */
116struct qbman_swp {
117	const struct qbman_swp_desc *desc;
118	void *addr_cena;
119	void __iomem *addr_cinh;
120
121	/* Management commands */
122	struct {
123		u32 valid_bit; /* 0x00 or 0x80 */
124	} mc;
125
126	/* Management response */
127	struct {
128		u32 valid_bit; /* 0x00 or 0x80 */
129	} mr;
130
131	/* Push dequeues */
132	u32 sdq;
133
134	/* Volatile dequeues */
135	struct {
136		atomic_t available; /* indicates if a command can be sent */
137		u32 valid_bit; /* 0x00 or 0x80 */
138		struct dpaa2_dq *storage; /* NULL if DQRR */
139	} vdq;
140
141	/* DQRR */
142	struct {
143		u32 next_idx;
144		u32 valid_bit;
145		u8 dqrr_size;
146		int reset_bug; /* indicates dqrr reset workaround is needed */
147	} dqrr;
148
149	struct {
150		u32 pi;
151		u32 pi_vb;
152		u32 pi_ring_size;
153		u32 pi_ci_mask;
154		u32 ci;
155		int available;
156		u32 pend;
157		u32 no_pfdr;
158	} eqcr;
159
160	spinlock_t access_spinlock;
161
162	/* Interrupt coalescing */
163	u32 irq_threshold;
164	u32 irq_holdoff;
165	int use_adaptive_rx_coalesce;
166};
167
168/* Function pointers */
169extern
170int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
171			     const struct qbman_eq_desc *d,
172			     const struct dpaa2_fd *fd);
173extern
174int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
175				      const struct qbman_eq_desc *d,
176				      const struct dpaa2_fd *fd,
177				      uint32_t *flags,
178				      int num_frames);
179extern
180int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
181					   const struct qbman_eq_desc *d,
182					   const struct dpaa2_fd *fd,
183					   int num_frames);
184extern
185int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
186extern
187const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
188extern
189int (*qbman_swp_release_ptr)(struct qbman_swp *s,
190			     const struct qbman_release_desc *d,
191			     const u64 *buffers,
192			     unsigned int num_buffers);
193
194/* Functions */
195struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
196void qbman_swp_finish(struct qbman_swp *p);
197u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
198void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
199u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
200void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
201int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
202void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
203
204void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
205void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
206
207void qbman_pull_desc_clear(struct qbman_pull_desc *d);
208void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
209				 struct dpaa2_dq *storage,
210				 dma_addr_t storage_phys,
211				 int stash);
212void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
213void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
214void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
215			    enum qbman_pull_type_e dct);
216void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
217				 enum qbman_pull_type_e dct);
218
219void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
220
221int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
222
223void qbman_eq_desc_clear(struct qbman_eq_desc *d);
224void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
225void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
226void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
227void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
228			  u32 qd_bin, u32 qd_prio);
229
230
231void qbman_release_desc_clear(struct qbman_release_desc *d);
232void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
233void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
234
235int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
236		      unsigned int num_buffers);
237int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
238			   u8 alt_fq_verb);
239int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
240		       u8 we_mask, u8 cdan_en,
241		       u64 ctx);
242
243void *qbman_swp_mc_start(struct qbman_swp *p);
244void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
245void *qbman_swp_mc_result(struct qbman_swp *p);
246
247/**
248 * qbman_swp_enqueue() - Issue an enqueue command
249 * @s:  the software portal used for enqueue
250 * @d:  the enqueue descriptor
251 * @fd: the frame descriptor to be enqueued
252 *
253 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
254 */
255static inline int
256qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
257		  const struct dpaa2_fd *fd)
258{
259	return qbman_swp_enqueue_ptr(s, d, fd);
260}
261
262/**
263 * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
264 * using one enqueue descriptor
265 * @s:  the software portal used for enqueue
266 * @d:  the enqueue descriptor
267 * @fd: table pointer of frame descriptor table to be enqueued
268 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
269 * @num_frames: number of fd to be enqueued
270 *
271 * Return the number of fd enqueued, or a negative error number.
272 */
273static inline int
274qbman_swp_enqueue_multiple(struct qbman_swp *s,
275			   const struct qbman_eq_desc *d,
276			   const struct dpaa2_fd *fd,
277			   uint32_t *flags,
278			   int num_frames)
279{
280	return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
281}
282
283/**
284 * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
285 * using multiple enqueue descriptor
286 * @s:  the software portal used for enqueue
287 * @d:  table of minimal enqueue descriptor
288 * @fd: table pointer of frame descriptor table to be enqueued
289 * @num_frames: number of fd to be enqueued
290 *
291 * Return the number of fd enqueued, or a negative error number.
292 */
293static inline int
294qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
295				const struct qbman_eq_desc *d,
296				const struct dpaa2_fd *fd,
297				int num_frames)
298{
299	return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
300}
301
302/**
303 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
304 * @dq: the dequeue result to be checked
305 *
306 * DQRR entries may contain non-dequeue results, ie. notifications
307 */
308static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
309{
310	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
311}
312
313/**
314 * qbman_result_is_SCN() - Check the dequeue result is notification or not
315 * @dq: the dequeue result to be checked
316 *
317 */
318static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
319{
320	return !qbman_result_is_DQ(dq);
321}
322
323/* FQ Data Availability */
324static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
325{
326	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
327}
328
329/* Channel Data Availability */
330static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
331{
332	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
333}
334
335/* Congestion State Change */
336static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
337{
338	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
339}
340
341/* Buffer Pool State Change */
342static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
343{
344	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
345}
346
347/* Congestion Group Count Update */
348static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
349{
350	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
351}
352
353/* Retirement */
354static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
355{
356	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
357}
358
359/* Retirement Immediate */
360static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
361{
362	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
363}
364
365 /* Park */
366static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
367{
368	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
369}
370
371/**
372 * qbman_result_SCN_state() - Get the state field in State-change notification
373 */
374static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
375{
376	return scn->scn.state;
377}
378
379#define SCN_RID_MASK 0x00FFFFFF
380
381/**
382 * qbman_result_SCN_rid() - Get the resource id in State-change notification
383 */
384static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
385{
386	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
387}
388
389/**
390 * qbman_result_SCN_ctx() - Get the context data in State-change notification
391 */
392static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
393{
394	return le64_to_cpu(scn->scn.ctx);
395}
396
397/**
398 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
399 * @s:    the software portal object
400 * @fqid: the index of frame queue to be scheduled
401 *
402 * There are a couple of different ways that a FQ can end up parked state,
403 * This schedules it.
404 *
405 * Return 0 for success, or negative error code for failure.
406 */
407static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
408{
409	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
410}
411
412/**
413 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
414 * @s:    the software portal object
415 * @fqid: the index of frame queue to be forced
416 *
417 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
418 * and thus be available for selection by any channel-dequeuing behaviour (push
419 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
420 * empty at the time this happens, the resulting dq_entry will have no FD.
421 * (qbman_result_DQ_fd() will return NULL.)
422 *
423 * Return 0 for success, or negative error code for failure.
424 */
425static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
426{
427	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
428}
429
430/**
431 * qbman_swp_fq_xon() - sets FQ flow-control to XON
432 * @s:    the software portal object
433 * @fqid: the index of frame queue
434 *
435 * This setting doesn't affect enqueues to the FQ, just dequeues.
436 *
437 * Return 0 for success, or negative error code for failure.
438 */
439static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
440{
441	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
442}
443
444/**
445 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
446 * @s:    the software portal object
447 * @fqid: the index of frame queue
448 *
449 * This setting doesn't affect enqueues to the FQ, just dequeues.
450 * XOFF FQs will remain in the tenatively-scheduled state, even when
451 * non-empty, meaning they won't be selected for scheduled dequeuing.
452 * If a FQ is changed to XOFF after it had already become truly-scheduled
453 * to a channel, and a pull dequeue of that channel occurs that selects
454 * that FQ for dequeuing, then the resulting dq_entry will have no FD.
455 * (qbman_result_DQ_fd() will return NULL.)
456 *
457 * Return 0 for success, or negative error code for failure.
458 */
459static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
460{
461	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
462}
463
464/* If the user has been allocated a channel object that is going to generate
465 * CDANs to another channel, then the qbman_swp_CDAN* functions will be
466 * necessary.
467 *
468 * CDAN-enabled channels only generate a single CDAN notification, after which
469 * they need to be reenabled before they'll generate another. The idea is
470 * that pull dequeuing will occur in reaction to the CDAN, followed by a
471 * reenable step. Each function generates a distinct command to hardware, so a
472 * combination function is provided if the user wishes to modify the "context"
473 * (which shows up in each CDAN message) each time they reenable, as a single
474 * command to hardware.
475 */
476
477/**
478 * qbman_swp_CDAN_set_context() - Set CDAN context
479 * @s:         the software portal object
480 * @channelid: the channel index
481 * @ctx:       the context to be set in CDAN
482 *
483 * Return 0 for success, or negative error code for failure.
484 */
485static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
486					     u64 ctx)
487{
488	return qbman_swp_CDAN_set(s, channelid,
489				  CODE_CDAN_WE_CTX,
490				  0, ctx);
491}
492
493/**
494 * qbman_swp_CDAN_enable() - Enable CDAN for the channel
495 * @s:         the software portal object
496 * @channelid: the index of the channel to generate CDAN
497 *
498 * Return 0 for success, or negative error code for failure.
499 */
500static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
501{
502	return qbman_swp_CDAN_set(s, channelid,
503				  CODE_CDAN_WE_EN,
504				  1, 0);
505}
506
507/**
508 * qbman_swp_CDAN_disable() - disable CDAN for the channel
509 * @s:         the software portal object
510 * @channelid: the index of the channel to generate CDAN
511 *
512 * Return 0 for success, or negative error code for failure.
513 */
514static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
515{
516	return qbman_swp_CDAN_set(s, channelid,
517				  CODE_CDAN_WE_EN,
518				  0, 0);
519}
520
521/**
522 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
523 * @s:         the software portal object
524 * @channelid: the index of the channel to generate CDAN
525 * @ctx:i      the context set in CDAN
526 *
527 * Return 0 for success, or negative error code for failure.
528 */
529static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
530						    u16 channelid,
531						    u64 ctx)
532{
533	return qbman_swp_CDAN_set(s, channelid,
534				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
535				  1, ctx);
536}
537
538/* Wraps up submit + poll-for-result */
539static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
540					  u8 cmd_verb)
541{
542	int loopvar = 2000;
543
544	qbman_swp_mc_submit(swp, cmd, cmd_verb);
545
546	do {
547		cmd = qbman_swp_mc_result(swp);
548	} while (!cmd && loopvar--);
549
550	WARN_ON(!loopvar);
551
552	return cmd;
553}
554
555/* Query APIs */
556struct qbman_fq_query_np_rslt {
557	u8 verb;
558	u8 rslt;
559	u8 st1;
560	u8 st2;
561	u8 reserved[2];
562	__le16 od1_sfdr;
563	__le16 od2_sfdr;
564	__le16 od3_sfdr;
565	__le16 ra1_sfdr;
566	__le16 ra2_sfdr;
567	__le32 pfdr_hptr;
568	__le32 pfdr_tptr;
569	__le32 frm_cnt;
570	__le32 byte_cnt;
571	__le16 ics_surp;
572	u8 is;
573	u8 reserved2[29];
574};
575
576int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
577			 struct qbman_fq_query_np_rslt *r);
578u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
579u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
580
581struct qbman_bp_query_rslt {
582	u8 verb;
583	u8 rslt;
584	u8 reserved[4];
585	u8 bdi;
586	u8 state;
587	__le32 fill;
588	__le32 hdotr;
589	__le16 swdet;
590	__le16 swdxt;
591	__le16 hwdet;
592	__le16 hwdxt;
593	__le16 swset;
594	__le16 swsxt;
595	__le16 vbpid;
596	__le16 icid;
597	__le64 bpscn_addr;
598	__le64 bpscn_ctx;
599	__le16 hw_targ;
600	u8 dbe;
601	u8 reserved2;
602	u8 sdcnt;
603	u8 hdcnt;
604	u8 sscnt;
605	u8 reserved3[9];
606};
607
608int qbman_bp_query(struct qbman_swp *s, u16 bpid,
609		   struct qbman_bp_query_rslt *r);
610
611u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
612
613/**
614 * qbman_swp_release() - Issue a buffer release command
615 * @s:           the software portal object
616 * @d:           the release descriptor
617 * @buffers:     a pointer pointing to the buffer address to be released
618 * @num_buffers: number of buffers to be released,  must be less than 8
619 *
620 * Return 0 for success, -EBUSY if the release command ring is not ready.
621 */
622static inline int qbman_swp_release(struct qbman_swp *s,
623				    const struct qbman_release_desc *d,
624				    const u64 *buffers,
625				    unsigned int num_buffers)
626{
627	return qbman_swp_release_ptr(s, d, buffers, num_buffers);
628}
629
630/**
631 * qbman_swp_pull() - Issue the pull dequeue command
632 * @s: the software portal object
633 * @d: the software portal descriptor which has been configured with
634 *     the set of qbman_pull_desc_set_*() calls
635 *
636 * Return 0 for success, and -EBUSY if the software portal is not ready
637 * to do pull dequeue.
638 */
639static inline int qbman_swp_pull(struct qbman_swp *s,
640				 struct qbman_pull_desc *d)
641{
642	return qbman_swp_pull_ptr(s, d);
643}
644
645/**
646 * qbman_swp_dqrr_next() - Get an valid DQRR entry
647 * @s: the software portal object
648 *
649 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
650 * only once, so repeated calls can return a sequence of DQRR entries, without
651 * requiring they be consumed immediately or in any particular order.
652 */
653static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
654{
655	return qbman_swp_dqrr_next_ptr(s);
656}
657
658int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
659				 u32 irq_holdoff);
660
661void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
662				  u32 *irq_holdoff);
663
664#endif /* __FSL_QBMAN_PORTAL_H */
665