xref: /OK3568_Linux_fs/kernel/drivers/soc/fsl/dpio/qbman-portal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4*4882a593Smuzhiyun  * Copyright 2016-2019 NXP
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef __FSL_QBMAN_PORTAL_H
8*4882a593Smuzhiyun #define __FSL_QBMAN_PORTAL_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <soc/fsl/dpaa2-fd.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define QMAN_REV_4000   0x04000000
13*4882a593Smuzhiyun #define QMAN_REV_4100   0x04010000
14*4882a593Smuzhiyun #define QMAN_REV_4101   0x04010001
15*4882a593Smuzhiyun #define QMAN_REV_5000   0x05000000
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define QMAN_REV_MASK   0xffff0000
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct dpaa2_dq;
20*4882a593Smuzhiyun struct qbman_swp;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* qbman software portal descriptor structure */
23*4882a593Smuzhiyun struct qbman_swp_desc {
24*4882a593Smuzhiyun 	void *cena_bar; /* Cache-enabled portal base address */
25*4882a593Smuzhiyun 	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26*4882a593Smuzhiyun 	u32 qman_version;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_EQRI 0x01
30*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_EQDI 0x02
31*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_DQRI 0x04
32*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_RCRI 0x08
33*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_RCDI 0x10
34*4882a593Smuzhiyun #define QBMAN_SWP_INTERRUPT_VDCI 0x20
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* the structure for pull dequeue descriptor */
37*4882a593Smuzhiyun struct qbman_pull_desc {
38*4882a593Smuzhiyun 	u8 verb;
39*4882a593Smuzhiyun 	u8 numf;
40*4882a593Smuzhiyun 	u8 tok;
41*4882a593Smuzhiyun 	u8 reserved;
42*4882a593Smuzhiyun 	__le32 dq_src;
43*4882a593Smuzhiyun 	__le64 rsp_addr;
44*4882a593Smuzhiyun 	u64 rsp_addr_virt;
45*4882a593Smuzhiyun 	u8 padding[40];
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun enum qbman_pull_type_e {
49*4882a593Smuzhiyun 	/* dequeue with priority precedence, respect intra-class scheduling */
50*4882a593Smuzhiyun 	qbman_pull_type_prio = 1,
51*4882a593Smuzhiyun 	/* dequeue with active FQ precedence, respect ICS */
52*4882a593Smuzhiyun 	qbman_pull_type_active,
53*4882a593Smuzhiyun 	/* dequeue with active FQ precedence, no ICS */
54*4882a593Smuzhiyun 	qbman_pull_type_active_noics
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Definitions for parsing dequeue entries */
58*4882a593Smuzhiyun #define QBMAN_RESULT_MASK      0x7f
59*4882a593Smuzhiyun #define QBMAN_RESULT_DQ        0x60
60*4882a593Smuzhiyun #define QBMAN_RESULT_FQRN      0x21
61*4882a593Smuzhiyun #define QBMAN_RESULT_FQRNI     0x22
62*4882a593Smuzhiyun #define QBMAN_RESULT_FQPN      0x24
63*4882a593Smuzhiyun #define QBMAN_RESULT_FQDAN     0x25
64*4882a593Smuzhiyun #define QBMAN_RESULT_CDAN      0x26
65*4882a593Smuzhiyun #define QBMAN_RESULT_CSCN_MEM  0x27
66*4882a593Smuzhiyun #define QBMAN_RESULT_CGCU      0x28
67*4882a593Smuzhiyun #define QBMAN_RESULT_BPSCN     0x29
68*4882a593Smuzhiyun #define QBMAN_RESULT_CSCN_WQ   0x2a
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* QBMan FQ management command codes */
71*4882a593Smuzhiyun #define QBMAN_FQ_SCHEDULE	0x48
72*4882a593Smuzhiyun #define QBMAN_FQ_FORCE		0x49
73*4882a593Smuzhiyun #define QBMAN_FQ_XON		0x4d
74*4882a593Smuzhiyun #define QBMAN_FQ_XOFF		0x4e
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* structure of enqueue descriptor */
77*4882a593Smuzhiyun struct qbman_eq_desc {
78*4882a593Smuzhiyun 	u8 verb;
79*4882a593Smuzhiyun 	u8 dca;
80*4882a593Smuzhiyun 	__le16 seqnum;
81*4882a593Smuzhiyun 	__le16 orpid;
82*4882a593Smuzhiyun 	__le16 reserved1;
83*4882a593Smuzhiyun 	__le32 tgtid;
84*4882a593Smuzhiyun 	__le32 tag;
85*4882a593Smuzhiyun 	__le16 qdbin;
86*4882a593Smuzhiyun 	u8 qpri;
87*4882a593Smuzhiyun 	u8 reserved[3];
88*4882a593Smuzhiyun 	u8 wae;
89*4882a593Smuzhiyun 	u8 rspid;
90*4882a593Smuzhiyun 	__le64 rsp_addr;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct qbman_eq_desc_with_fd {
94*4882a593Smuzhiyun 	struct qbman_eq_desc desc;
95*4882a593Smuzhiyun 	u8 fd[32];
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* buffer release descriptor */
99*4882a593Smuzhiyun struct qbman_release_desc {
100*4882a593Smuzhiyun 	u8 verb;
101*4882a593Smuzhiyun 	u8 reserved;
102*4882a593Smuzhiyun 	__le16 bpid;
103*4882a593Smuzhiyun 	__le32 reserved2;
104*4882a593Smuzhiyun 	__le64 buf[7];
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* Management command result codes */
108*4882a593Smuzhiyun #define QBMAN_MC_RSLT_OK      0xf0
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define CODE_CDAN_WE_EN    0x1
111*4882a593Smuzhiyun #define CODE_CDAN_WE_CTX   0x4
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* portal data structure */
114*4882a593Smuzhiyun struct qbman_swp {
115*4882a593Smuzhiyun 	const struct qbman_swp_desc *desc;
116*4882a593Smuzhiyun 	void *addr_cena;
117*4882a593Smuzhiyun 	void __iomem *addr_cinh;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* Management commands */
120*4882a593Smuzhiyun 	struct {
121*4882a593Smuzhiyun 		u32 valid_bit; /* 0x00 or 0x80 */
122*4882a593Smuzhiyun 	} mc;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Management response */
125*4882a593Smuzhiyun 	struct {
126*4882a593Smuzhiyun 		u32 valid_bit; /* 0x00 or 0x80 */
127*4882a593Smuzhiyun 	} mr;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Push dequeues */
130*4882a593Smuzhiyun 	u32 sdq;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Volatile dequeues */
133*4882a593Smuzhiyun 	struct {
134*4882a593Smuzhiyun 		atomic_t available; /* indicates if a command can be sent */
135*4882a593Smuzhiyun 		u32 valid_bit; /* 0x00 or 0x80 */
136*4882a593Smuzhiyun 		struct dpaa2_dq *storage; /* NULL if DQRR */
137*4882a593Smuzhiyun 	} vdq;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* DQRR */
140*4882a593Smuzhiyun 	struct {
141*4882a593Smuzhiyun 		u32 next_idx;
142*4882a593Smuzhiyun 		u32 valid_bit;
143*4882a593Smuzhiyun 		u8 dqrr_size;
144*4882a593Smuzhiyun 		int reset_bug; /* indicates dqrr reset workaround is needed */
145*4882a593Smuzhiyun 	} dqrr;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	struct {
148*4882a593Smuzhiyun 		u32 pi;
149*4882a593Smuzhiyun 		u32 pi_vb;
150*4882a593Smuzhiyun 		u32 pi_ring_size;
151*4882a593Smuzhiyun 		u32 pi_ci_mask;
152*4882a593Smuzhiyun 		u32 ci;
153*4882a593Smuzhiyun 		int available;
154*4882a593Smuzhiyun 		u32 pend;
155*4882a593Smuzhiyun 		u32 no_pfdr;
156*4882a593Smuzhiyun 	} eqcr;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	spinlock_t access_spinlock;
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /* Function pointers */
162*4882a593Smuzhiyun extern
163*4882a593Smuzhiyun int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
164*4882a593Smuzhiyun 			     const struct qbman_eq_desc *d,
165*4882a593Smuzhiyun 			     const struct dpaa2_fd *fd);
166*4882a593Smuzhiyun extern
167*4882a593Smuzhiyun int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
168*4882a593Smuzhiyun 				      const struct qbman_eq_desc *d,
169*4882a593Smuzhiyun 				      const struct dpaa2_fd *fd,
170*4882a593Smuzhiyun 				      uint32_t *flags,
171*4882a593Smuzhiyun 				      int num_frames);
172*4882a593Smuzhiyun extern
173*4882a593Smuzhiyun int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
174*4882a593Smuzhiyun 					   const struct qbman_eq_desc *d,
175*4882a593Smuzhiyun 					   const struct dpaa2_fd *fd,
176*4882a593Smuzhiyun 					   int num_frames);
177*4882a593Smuzhiyun extern
178*4882a593Smuzhiyun int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
179*4882a593Smuzhiyun extern
180*4882a593Smuzhiyun const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
181*4882a593Smuzhiyun extern
182*4882a593Smuzhiyun int (*qbman_swp_release_ptr)(struct qbman_swp *s,
183*4882a593Smuzhiyun 			     const struct qbman_release_desc *d,
184*4882a593Smuzhiyun 			     const u64 *buffers,
185*4882a593Smuzhiyun 			     unsigned int num_buffers);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /* Functions */
188*4882a593Smuzhiyun struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
189*4882a593Smuzhiyun void qbman_swp_finish(struct qbman_swp *p);
190*4882a593Smuzhiyun u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
191*4882a593Smuzhiyun void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
192*4882a593Smuzhiyun u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
193*4882a593Smuzhiyun void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
194*4882a593Smuzhiyun int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
195*4882a593Smuzhiyun void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
198*4882a593Smuzhiyun void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun void qbman_pull_desc_clear(struct qbman_pull_desc *d);
201*4882a593Smuzhiyun void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
202*4882a593Smuzhiyun 				 struct dpaa2_dq *storage,
203*4882a593Smuzhiyun 				 dma_addr_t storage_phys,
204*4882a593Smuzhiyun 				 int stash);
205*4882a593Smuzhiyun void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
206*4882a593Smuzhiyun void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
207*4882a593Smuzhiyun void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
208*4882a593Smuzhiyun 			    enum qbman_pull_type_e dct);
209*4882a593Smuzhiyun void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
210*4882a593Smuzhiyun 				 enum qbman_pull_type_e dct);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun void qbman_eq_desc_clear(struct qbman_eq_desc *d);
217*4882a593Smuzhiyun void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
218*4882a593Smuzhiyun void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
219*4882a593Smuzhiyun void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
220*4882a593Smuzhiyun void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
221*4882a593Smuzhiyun 			  u32 qd_bin, u32 qd_prio);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun void qbman_release_desc_clear(struct qbman_release_desc *d);
225*4882a593Smuzhiyun void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
226*4882a593Smuzhiyun void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
229*4882a593Smuzhiyun 		      unsigned int num_buffers);
230*4882a593Smuzhiyun int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
231*4882a593Smuzhiyun 			   u8 alt_fq_verb);
232*4882a593Smuzhiyun int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
233*4882a593Smuzhiyun 		       u8 we_mask, u8 cdan_en,
234*4882a593Smuzhiyun 		       u64 ctx);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun void *qbman_swp_mc_start(struct qbman_swp *p);
237*4882a593Smuzhiyun void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
238*4882a593Smuzhiyun void *qbman_swp_mc_result(struct qbman_swp *p);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * qbman_swp_enqueue() - Issue an enqueue command
242*4882a593Smuzhiyun  * @s:  the software portal used for enqueue
243*4882a593Smuzhiyun  * @d:  the enqueue descriptor
244*4882a593Smuzhiyun  * @fd: the frame descriptor to be enqueued
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
247*4882a593Smuzhiyun  */
248*4882a593Smuzhiyun static inline int
qbman_swp_enqueue(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd)249*4882a593Smuzhiyun qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
250*4882a593Smuzhiyun 		  const struct dpaa2_fd *fd)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	return qbman_swp_enqueue_ptr(s, d, fd);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun  * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
257*4882a593Smuzhiyun  * using one enqueue descriptor
258*4882a593Smuzhiyun  * @s:  the software portal used for enqueue
259*4882a593Smuzhiyun  * @d:  the enqueue descriptor
260*4882a593Smuzhiyun  * @fd: table pointer of frame descriptor table to be enqueued
261*4882a593Smuzhiyun  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
262*4882a593Smuzhiyun  * @num_frames: number of fd to be enqueued
263*4882a593Smuzhiyun  *
264*4882a593Smuzhiyun  * Return the number of fd enqueued, or a negative error number.
265*4882a593Smuzhiyun  */
266*4882a593Smuzhiyun static inline int
qbman_swp_enqueue_multiple(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,uint32_t * flags,int num_frames)267*4882a593Smuzhiyun qbman_swp_enqueue_multiple(struct qbman_swp *s,
268*4882a593Smuzhiyun 			   const struct qbman_eq_desc *d,
269*4882a593Smuzhiyun 			   const struct dpaa2_fd *fd,
270*4882a593Smuzhiyun 			   uint32_t *flags,
271*4882a593Smuzhiyun 			   int num_frames)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
278*4882a593Smuzhiyun  * using multiple enqueue descriptor
279*4882a593Smuzhiyun  * @s:  the software portal used for enqueue
280*4882a593Smuzhiyun  * @d:  table of minimal enqueue descriptor
281*4882a593Smuzhiyun  * @fd: table pointer of frame descriptor table to be enqueued
282*4882a593Smuzhiyun  * @num_frames: number of fd to be enqueued
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * Return the number of fd enqueued, or a negative error number.
285*4882a593Smuzhiyun  */
286*4882a593Smuzhiyun static inline int
qbman_swp_enqueue_multiple_desc(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,int num_frames)287*4882a593Smuzhiyun qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
288*4882a593Smuzhiyun 				const struct qbman_eq_desc *d,
289*4882a593Smuzhiyun 				const struct dpaa2_fd *fd,
290*4882a593Smuzhiyun 				int num_frames)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /**
296*4882a593Smuzhiyun  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
297*4882a593Smuzhiyun  * @dq: the dequeue result to be checked
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * DQRR entries may contain non-dequeue results, ie. notifications
300*4882a593Smuzhiyun  */
qbman_result_is_DQ(const struct dpaa2_dq * dq)301*4882a593Smuzhiyun static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun  * qbman_result_is_SCN() - Check the dequeue result is notification or not
308*4882a593Smuzhiyun  * @dq: the dequeue result to be checked
309*4882a593Smuzhiyun  *
310*4882a593Smuzhiyun  */
qbman_result_is_SCN(const struct dpaa2_dq * dq)311*4882a593Smuzhiyun static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	return !qbman_result_is_DQ(dq);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun /* FQ Data Availability */
qbman_result_is_FQDAN(const struct dpaa2_dq * dq)317*4882a593Smuzhiyun static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun /* Channel Data Availability */
qbman_result_is_CDAN(const struct dpaa2_dq * dq)323*4882a593Smuzhiyun static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /* Congestion State Change */
qbman_result_is_CSCN(const struct dpaa2_dq * dq)329*4882a593Smuzhiyun static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /* Buffer Pool State Change */
qbman_result_is_BPSCN(const struct dpaa2_dq * dq)335*4882a593Smuzhiyun static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /* Congestion Group Count Update */
qbman_result_is_CGCU(const struct dpaa2_dq * dq)341*4882a593Smuzhiyun static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun /* Retirement */
qbman_result_is_FQRN(const struct dpaa2_dq * dq)347*4882a593Smuzhiyun static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /* Retirement Immediate */
qbman_result_is_FQRNI(const struct dpaa2_dq * dq)353*4882a593Smuzhiyun static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun  /* Park */
qbman_result_is_FQPN(const struct dpaa2_dq * dq)359*4882a593Smuzhiyun static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun  * qbman_result_SCN_state() - Get the state field in State-change notification
366*4882a593Smuzhiyun  */
qbman_result_SCN_state(const struct dpaa2_dq * scn)367*4882a593Smuzhiyun static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	return scn->scn.state;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun #define SCN_RID_MASK 0x00FFFFFF
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * qbman_result_SCN_rid() - Get the resource id in State-change notification
376*4882a593Smuzhiyun  */
qbman_result_SCN_rid(const struct dpaa2_dq * scn)377*4882a593Smuzhiyun static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun  * qbman_result_SCN_ctx() - Get the context data in State-change notification
384*4882a593Smuzhiyun  */
qbman_result_SCN_ctx(const struct dpaa2_dq * scn)385*4882a593Smuzhiyun static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	return le64_to_cpu(scn->scn.ctx);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /**
391*4882a593Smuzhiyun  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
392*4882a593Smuzhiyun  * @s:    the software portal object
393*4882a593Smuzhiyun  * @fqid: the index of frame queue to be scheduled
394*4882a593Smuzhiyun  *
395*4882a593Smuzhiyun  * There are a couple of different ways that a FQ can end up parked state,
396*4882a593Smuzhiyun  * This schedules it.
397*4882a593Smuzhiyun  *
398*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
399*4882a593Smuzhiyun  */
qbman_swp_fq_schedule(struct qbman_swp * s,u32 fqid)400*4882a593Smuzhiyun static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
407*4882a593Smuzhiyun  * @s:    the software portal object
408*4882a593Smuzhiyun  * @fqid: the index of frame queue to be forced
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
411*4882a593Smuzhiyun  * and thus be available for selection by any channel-dequeuing behaviour (push
412*4882a593Smuzhiyun  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
413*4882a593Smuzhiyun  * empty at the time this happens, the resulting dq_entry will have no FD.
414*4882a593Smuzhiyun  * (qbman_result_DQ_fd() will return NULL.)
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
417*4882a593Smuzhiyun  */
qbman_swp_fq_force(struct qbman_swp * s,u32 fqid)418*4882a593Smuzhiyun static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun  * qbman_swp_fq_xon() - sets FQ flow-control to XON
425*4882a593Smuzhiyun  * @s:    the software portal object
426*4882a593Smuzhiyun  * @fqid: the index of frame queue
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * This setting doesn't affect enqueues to the FQ, just dequeues.
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
431*4882a593Smuzhiyun  */
qbman_swp_fq_xon(struct qbman_swp * s,u32 fqid)432*4882a593Smuzhiyun static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
439*4882a593Smuzhiyun  * @s:    the software portal object
440*4882a593Smuzhiyun  * @fqid: the index of frame queue
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  * This setting doesn't affect enqueues to the FQ, just dequeues.
443*4882a593Smuzhiyun  * XOFF FQs will remain in the tenatively-scheduled state, even when
444*4882a593Smuzhiyun  * non-empty, meaning they won't be selected for scheduled dequeuing.
445*4882a593Smuzhiyun  * If a FQ is changed to XOFF after it had already become truly-scheduled
446*4882a593Smuzhiyun  * to a channel, and a pull dequeue of that channel occurs that selects
447*4882a593Smuzhiyun  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
448*4882a593Smuzhiyun  * (qbman_result_DQ_fd() will return NULL.)
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
451*4882a593Smuzhiyun  */
qbman_swp_fq_xoff(struct qbman_swp * s,u32 fqid)452*4882a593Smuzhiyun static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /* If the user has been allocated a channel object that is going to generate
458*4882a593Smuzhiyun  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
459*4882a593Smuzhiyun  * necessary.
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * CDAN-enabled channels only generate a single CDAN notification, after which
462*4882a593Smuzhiyun  * they need to be reenabled before they'll generate another. The idea is
463*4882a593Smuzhiyun  * that pull dequeuing will occur in reaction to the CDAN, followed by a
464*4882a593Smuzhiyun  * reenable step. Each function generates a distinct command to hardware, so a
465*4882a593Smuzhiyun  * combination function is provided if the user wishes to modify the "context"
466*4882a593Smuzhiyun  * (which shows up in each CDAN message) each time they reenable, as a single
467*4882a593Smuzhiyun  * command to hardware.
468*4882a593Smuzhiyun  */
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun  * qbman_swp_CDAN_set_context() - Set CDAN context
472*4882a593Smuzhiyun  * @s:         the software portal object
473*4882a593Smuzhiyun  * @channelid: the channel index
474*4882a593Smuzhiyun  * @ctx:       the context to be set in CDAN
475*4882a593Smuzhiyun  *
476*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
477*4882a593Smuzhiyun  */
qbman_swp_CDAN_set_context(struct qbman_swp * s,u16 channelid,u64 ctx)478*4882a593Smuzhiyun static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
479*4882a593Smuzhiyun 					     u64 ctx)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	return qbman_swp_CDAN_set(s, channelid,
482*4882a593Smuzhiyun 				  CODE_CDAN_WE_CTX,
483*4882a593Smuzhiyun 				  0, ctx);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
488*4882a593Smuzhiyun  * @s:         the software portal object
489*4882a593Smuzhiyun  * @channelid: the index of the channel to generate CDAN
490*4882a593Smuzhiyun  *
491*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
492*4882a593Smuzhiyun  */
qbman_swp_CDAN_enable(struct qbman_swp * s,u16 channelid)493*4882a593Smuzhiyun static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	return qbman_swp_CDAN_set(s, channelid,
496*4882a593Smuzhiyun 				  CODE_CDAN_WE_EN,
497*4882a593Smuzhiyun 				  1, 0);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun /**
501*4882a593Smuzhiyun  * qbman_swp_CDAN_disable() - disable CDAN for the channel
502*4882a593Smuzhiyun  * @s:         the software portal object
503*4882a593Smuzhiyun  * @channelid: the index of the channel to generate CDAN
504*4882a593Smuzhiyun  *
505*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
506*4882a593Smuzhiyun  */
qbman_swp_CDAN_disable(struct qbman_swp * s,u16 channelid)507*4882a593Smuzhiyun static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	return qbman_swp_CDAN_set(s, channelid,
510*4882a593Smuzhiyun 				  CODE_CDAN_WE_EN,
511*4882a593Smuzhiyun 				  0, 0);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /**
515*4882a593Smuzhiyun  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
516*4882a593Smuzhiyun  * @s:         the software portal object
517*4882a593Smuzhiyun  * @channelid: the index of the channel to generate CDAN
518*4882a593Smuzhiyun  * @ctx:i      the context set in CDAN
519*4882a593Smuzhiyun  *
520*4882a593Smuzhiyun  * Return 0 for success, or negative error code for failure.
521*4882a593Smuzhiyun  */
qbman_swp_CDAN_set_context_enable(struct qbman_swp * s,u16 channelid,u64 ctx)522*4882a593Smuzhiyun static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
523*4882a593Smuzhiyun 						    u16 channelid,
524*4882a593Smuzhiyun 						    u64 ctx)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return qbman_swp_CDAN_set(s, channelid,
527*4882a593Smuzhiyun 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
528*4882a593Smuzhiyun 				  1, ctx);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /* Wraps up submit + poll-for-result */
qbman_swp_mc_complete(struct qbman_swp * swp,void * cmd,u8 cmd_verb)532*4882a593Smuzhiyun static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
533*4882a593Smuzhiyun 					  u8 cmd_verb)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	int loopvar = 2000;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	do {
540*4882a593Smuzhiyun 		cmd = qbman_swp_mc_result(swp);
541*4882a593Smuzhiyun 	} while (!cmd && loopvar--);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	WARN_ON(!loopvar);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return cmd;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /* Query APIs */
549*4882a593Smuzhiyun struct qbman_fq_query_np_rslt {
550*4882a593Smuzhiyun 	u8 verb;
551*4882a593Smuzhiyun 	u8 rslt;
552*4882a593Smuzhiyun 	u8 st1;
553*4882a593Smuzhiyun 	u8 st2;
554*4882a593Smuzhiyun 	u8 reserved[2];
555*4882a593Smuzhiyun 	__le16 od1_sfdr;
556*4882a593Smuzhiyun 	__le16 od2_sfdr;
557*4882a593Smuzhiyun 	__le16 od3_sfdr;
558*4882a593Smuzhiyun 	__le16 ra1_sfdr;
559*4882a593Smuzhiyun 	__le16 ra2_sfdr;
560*4882a593Smuzhiyun 	__le32 pfdr_hptr;
561*4882a593Smuzhiyun 	__le32 pfdr_tptr;
562*4882a593Smuzhiyun 	__le32 frm_cnt;
563*4882a593Smuzhiyun 	__le32 byte_cnt;
564*4882a593Smuzhiyun 	__le16 ics_surp;
565*4882a593Smuzhiyun 	u8 is;
566*4882a593Smuzhiyun 	u8 reserved2[29];
567*4882a593Smuzhiyun };
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
570*4882a593Smuzhiyun 			 struct qbman_fq_query_np_rslt *r);
571*4882a593Smuzhiyun u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
572*4882a593Smuzhiyun u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun struct qbman_bp_query_rslt {
575*4882a593Smuzhiyun 	u8 verb;
576*4882a593Smuzhiyun 	u8 rslt;
577*4882a593Smuzhiyun 	u8 reserved[4];
578*4882a593Smuzhiyun 	u8 bdi;
579*4882a593Smuzhiyun 	u8 state;
580*4882a593Smuzhiyun 	__le32 fill;
581*4882a593Smuzhiyun 	__le32 hdotr;
582*4882a593Smuzhiyun 	__le16 swdet;
583*4882a593Smuzhiyun 	__le16 swdxt;
584*4882a593Smuzhiyun 	__le16 hwdet;
585*4882a593Smuzhiyun 	__le16 hwdxt;
586*4882a593Smuzhiyun 	__le16 swset;
587*4882a593Smuzhiyun 	__le16 swsxt;
588*4882a593Smuzhiyun 	__le16 vbpid;
589*4882a593Smuzhiyun 	__le16 icid;
590*4882a593Smuzhiyun 	__le64 bpscn_addr;
591*4882a593Smuzhiyun 	__le64 bpscn_ctx;
592*4882a593Smuzhiyun 	__le16 hw_targ;
593*4882a593Smuzhiyun 	u8 dbe;
594*4882a593Smuzhiyun 	u8 reserved2;
595*4882a593Smuzhiyun 	u8 sdcnt;
596*4882a593Smuzhiyun 	u8 hdcnt;
597*4882a593Smuzhiyun 	u8 sscnt;
598*4882a593Smuzhiyun 	u8 reserved3[9];
599*4882a593Smuzhiyun };
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun int qbman_bp_query(struct qbman_swp *s, u16 bpid,
602*4882a593Smuzhiyun 		   struct qbman_bp_query_rslt *r);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun  * qbman_swp_release() - Issue a buffer release command
608*4882a593Smuzhiyun  * @s:           the software portal object
609*4882a593Smuzhiyun  * @d:           the release descriptor
610*4882a593Smuzhiyun  * @buffers:     a pointer pointing to the buffer address to be released
611*4882a593Smuzhiyun  * @num_buffers: number of buffers to be released,  must be less than 8
612*4882a593Smuzhiyun  *
613*4882a593Smuzhiyun  * Return 0 for success, -EBUSY if the release command ring is not ready.
614*4882a593Smuzhiyun  */
qbman_swp_release(struct qbman_swp * s,const struct qbman_release_desc * d,const u64 * buffers,unsigned int num_buffers)615*4882a593Smuzhiyun static inline int qbman_swp_release(struct qbman_swp *s,
616*4882a593Smuzhiyun 				    const struct qbman_release_desc *d,
617*4882a593Smuzhiyun 				    const u64 *buffers,
618*4882a593Smuzhiyun 				    unsigned int num_buffers)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	return qbman_swp_release_ptr(s, d, buffers, num_buffers);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun  * qbman_swp_pull() - Issue the pull dequeue command
625*4882a593Smuzhiyun  * @s: the software portal object
626*4882a593Smuzhiyun  * @d: the software portal descriptor which has been configured with
627*4882a593Smuzhiyun  *     the set of qbman_pull_desc_set_*() calls
628*4882a593Smuzhiyun  *
629*4882a593Smuzhiyun  * Return 0 for success, and -EBUSY if the software portal is not ready
630*4882a593Smuzhiyun  * to do pull dequeue.
631*4882a593Smuzhiyun  */
qbman_swp_pull(struct qbman_swp * s,struct qbman_pull_desc * d)632*4882a593Smuzhiyun static inline int qbman_swp_pull(struct qbman_swp *s,
633*4882a593Smuzhiyun 				 struct qbman_pull_desc *d)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	return qbman_swp_pull_ptr(s, d);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun  * qbman_swp_dqrr_next() - Get an valid DQRR entry
640*4882a593Smuzhiyun  * @s: the software portal object
641*4882a593Smuzhiyun  *
642*4882a593Smuzhiyun  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
643*4882a593Smuzhiyun  * only once, so repeated calls can return a sequence of DQRR entries, without
644*4882a593Smuzhiyun  * requiring they be consumed immediately or in any particular order.
645*4882a593Smuzhiyun  */
qbman_swp_dqrr_next(struct qbman_swp * s)646*4882a593Smuzhiyun static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	return qbman_swp_dqrr_next_ptr(s);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun #endif /* __FSL_QBMAN_PORTAL_H */
652