xref: /OK3568_Linux_fs/u-boot/include/fsl-mc/fsl_qbman_portal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2014 Freescale Semiconductor
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _FSL_QBMAN_PORTAL_H
8*4882a593Smuzhiyun #define _FSL_QBMAN_PORTAL_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <fsl-mc/fsl_qbman_base.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /* Create and destroy a functional object representing the given QBMan portal
13*4882a593Smuzhiyun  * descriptor. */
14*4882a593Smuzhiyun struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	/************/
17*4882a593Smuzhiyun 	/* Dequeues */
18*4882a593Smuzhiyun 	/************/
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* See the QBMan driver API documentation for details on the enqueue
21*4882a593Smuzhiyun  * mechanisms. NB: the use of a 'ldpaa_' prefix for this type is because it is
22*4882a593Smuzhiyun  * primarily used by the "DPIO" layer that sits above (and hides) the QBMan
23*4882a593Smuzhiyun  * driver. The structure is defined in the DPIO interface, but to avoid circular
24*4882a593Smuzhiyun  * dependencies we just pre/re-declare it here opaquely. */
25*4882a593Smuzhiyun struct ldpaa_dq;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* ------------------- */
29*4882a593Smuzhiyun /* Pull-mode dequeuing */
30*4882a593Smuzhiyun /* ------------------- */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct qbman_pull_desc {
33*4882a593Smuzhiyun 	uint32_t dont_manipulate_directly[6];
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Clear the contents of a descriptor to default/starting state. */
37*4882a593Smuzhiyun void qbman_pull_desc_clear(struct qbman_pull_desc *);
38*4882a593Smuzhiyun /* If not called, or if called with 'storage' as NULL, the result pull dequeues
39*4882a593Smuzhiyun  * will produce results to DQRR. If 'storage' is non-NULL, then results are
40*4882a593Smuzhiyun  * produced to the given memory location (using the physical/DMA address which
41*4882a593Smuzhiyun  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
42*4882a593Smuzhiyun  * those writes to main-memory express a cache-warming attribute. */
43*4882a593Smuzhiyun void qbman_pull_desc_set_storage(struct qbman_pull_desc *,
44*4882a593Smuzhiyun 				 struct ldpaa_dq *storage,
45*4882a593Smuzhiyun 				 dma_addr_t storage_phys,
46*4882a593Smuzhiyun 				 int stash);
47*4882a593Smuzhiyun /* numframes must be between 1 and 16, inclusive */
48*4882a593Smuzhiyun void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes);
49*4882a593Smuzhiyun /* token is the value that shows up in the dequeue results that can be used to
50*4882a593Smuzhiyun  * detect when the results have been published, and is not really used when
51*4882a593Smuzhiyun  * dequeue results go to DQRR. The easiest technique is to zero result "storage"
52*4882a593Smuzhiyun  * before issuing a pull dequeue, and use any non-zero 'token' value. */
53*4882a593Smuzhiyun void qbman_pull_desc_set_token(struct qbman_pull_desc *, uint8_t token);
54*4882a593Smuzhiyun /* Exactly one of the following descriptor "actions" should be set. (Calling any
55*4882a593Smuzhiyun  * one of these will replace the effect of any prior call to one of these.)
56*4882a593Smuzhiyun  * - pull dequeue from the given frame queue (FQ)
57*4882a593Smuzhiyun  * - pull dequeue from any FQ in the given work queue (WQ)
58*4882a593Smuzhiyun  * - pull dequeue from any FQ in any WQ in the given channel
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Issue the pull dequeue command */
63*4882a593Smuzhiyun int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* -------------------------------- */
66*4882a593Smuzhiyun /* Polling DQRR for dequeue results */
67*4882a593Smuzhiyun /* -------------------------------- */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
70*4882a593Smuzhiyun  * only once, so repeated calls can return a sequence of DQRR entries, without
71*4882a593Smuzhiyun  * requiring they be consumed immediately or in any particular order. */
72*4882a593Smuzhiyun const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *);
73*4882a593Smuzhiyun /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
74*4882a593Smuzhiyun void qbman_swp_dqrr_consume(struct qbman_swp *, const struct ldpaa_dq *);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* ------------------------------------------------- */
77*4882a593Smuzhiyun /* Polling user-provided storage for dequeue results */
78*4882a593Smuzhiyun /* ------------------------------------------------- */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* Only used for user-provided storage of dequeue results, not DQRR. Prior to
81*4882a593Smuzhiyun  * being used, the storage must set "oldtoken", so that the driver notices when
82*4882a593Smuzhiyun  * hardware has filled it in with results using a "newtoken". NB, for efficiency
83*4882a593Smuzhiyun  * purposes, the driver will perform any required endianness conversion to
84*4882a593Smuzhiyun  * ensure that the user's dequeue result storage is in host-endian format
85*4882a593Smuzhiyun  * (whether or not that is the same as the little-endian format that hardware
86*4882a593Smuzhiyun  * DMA'd to the user's storage). As such, once the user has called
87*4882a593Smuzhiyun  * qbman_dq_entry_has_newtoken() and been returned a valid dequeue result, they
88*4882a593Smuzhiyun  * should not call it again on the same memory location (except of course if
89*4882a593Smuzhiyun  * another dequeue command has been executed to produce a new result to that
90*4882a593Smuzhiyun  * location).
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *,
93*4882a593Smuzhiyun 				 unsigned int num_entries,
94*4882a593Smuzhiyun 				 uint8_t oldtoken);
95*4882a593Smuzhiyun int qbman_dq_entry_has_newtoken(struct qbman_swp *,
96*4882a593Smuzhiyun 				const struct ldpaa_dq *,
97*4882a593Smuzhiyun 				uint8_t newtoken);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* -------------------------------------------------------- */
100*4882a593Smuzhiyun /* Parsing dequeue entries (DQRR and user-provided storage) */
101*4882a593Smuzhiyun /* -------------------------------------------------------- */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* DQRR entries may contain non-dequeue results, ie. notifications */
104*4882a593Smuzhiyun int qbman_dq_entry_is_DQ(const struct ldpaa_dq *);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/************/
107*4882a593Smuzhiyun 	/* Enqueues */
108*4882a593Smuzhiyun 	/************/
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct qbman_eq_desc {
111*4882a593Smuzhiyun 	uint32_t dont_manipulate_directly[8];
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Clear the contents of a descriptor to default/starting state. */
116*4882a593Smuzhiyun void qbman_eq_desc_clear(struct qbman_eq_desc *);
117*4882a593Smuzhiyun /* Exactly one of the following descriptor "actions" should be set. (Calling
118*4882a593Smuzhiyun  * any one of these will replace the effect of any prior call to one of these.)
119*4882a593Smuzhiyun  * - enqueue without order-restoration
120*4882a593Smuzhiyun  * - enqueue with order-restoration
121*4882a593Smuzhiyun  * - fill a hole in the order-restoration sequence, without any enqueue
122*4882a593Smuzhiyun  * - advance NESN (Next Expected Sequence Number), without any enqueue
123*4882a593Smuzhiyun  * 'respond_success' indicates whether an enqueue response should be DMA'd
124*4882a593Smuzhiyun  * after success (otherwise a response is DMA'd only after failure).
125*4882a593Smuzhiyun  * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
126*4882a593Smuzhiyun  * be enqueued.
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *, int respond_success);
129*4882a593Smuzhiyun void qbman_eq_desc_set_response(struct qbman_eq_desc *,
130*4882a593Smuzhiyun 				dma_addr_t storage_phys,
131*4882a593Smuzhiyun 				int stash);
132*4882a593Smuzhiyun /* token is the value that shows up in an enqueue response that can be used to
133*4882a593Smuzhiyun  * detect when the results have been published. The easiest technique is to zero
134*4882a593Smuzhiyun  * result "storage" before issuing an enqueue, and use any non-zero 'token'
135*4882a593Smuzhiyun  * value. */
136*4882a593Smuzhiyun void qbman_eq_desc_set_token(struct qbman_eq_desc *, uint8_t token);
137*4882a593Smuzhiyun /* Exactly one of the following descriptor "targets" should be set. (Calling any
138*4882a593Smuzhiyun  * one of these will replace the effect of any prior call to one of these.)
139*4882a593Smuzhiyun  * - enqueue to a frame queue
140*4882a593Smuzhiyun  * - enqueue to a queuing destination
141*4882a593Smuzhiyun  * Note, that none of these will have any affect if the "action" type has been
142*4882a593Smuzhiyun  * set to "orp_hole" or "orp_nesn".
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid);
145*4882a593Smuzhiyun void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid,
146*4882a593Smuzhiyun 			  uint32_t qd_bin, uint32_t qd_prio);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* Issue an enqueue command. ('fd' should only be NULL if the "action" of the
149*4882a593Smuzhiyun  * descriptor is "orp_hole" or "orp_nesn".) */
150*4882a593Smuzhiyun int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *,
151*4882a593Smuzhiyun 		      const struct qbman_fd *fd);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/*******************/
154*4882a593Smuzhiyun 	/* Buffer releases */
155*4882a593Smuzhiyun 	/*******************/
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun struct qbman_release_desc {
158*4882a593Smuzhiyun 	uint32_t dont_manipulate_directly[1];
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /* Clear the contents of a descriptor to default/starting state. */
162*4882a593Smuzhiyun void qbman_release_desc_clear(struct qbman_release_desc *);
163*4882a593Smuzhiyun /* Set the ID of the buffer pool to release to */
164*4882a593Smuzhiyun void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid);
165*4882a593Smuzhiyun /* Issue a release command. 'num_buffers' must be less than 8. */
166*4882a593Smuzhiyun int qbman_swp_release(struct qbman_swp *, const struct qbman_release_desc *,
167*4882a593Smuzhiyun 		      const uint64_t *buffers, unsigned int num_buffers);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/*******************/
170*4882a593Smuzhiyun 	/* Buffer acquires */
171*4882a593Smuzhiyun 	/*******************/
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers,
174*4882a593Smuzhiyun 		      unsigned int num_buffers);
175*4882a593Smuzhiyun #endif /* !_FSL_QBMAN_PORTAL_H */
176