1*4882a593Smuzhiyun /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
4*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met:
5*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
6*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
7*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
8*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
9*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
10*4882a593Smuzhiyun * * Neither the name of Freescale Semiconductor nor the
11*4882a593Smuzhiyun * names of its contributors may be used to endorse or promote products
12*4882a593Smuzhiyun * derived from this software without specific prior written permission.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * ALTERNATIVELY, this software may be distributed under the terms of the
15*4882a593Smuzhiyun * GNU General Public License ("GPL") as published by the Free Software
16*4882a593Smuzhiyun * Foundation, either version 2 of that License or (at your option) any
17*4882a593Smuzhiyun * later version.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20*4882a593Smuzhiyun * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22*4882a593Smuzhiyun * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23*4882a593Smuzhiyun * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25*4882a593Smuzhiyun * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26*4882a593Smuzhiyun * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28*4882a593Smuzhiyun * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "dpaa_sys.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <soc/fsl/qman.h>
34*4882a593Smuzhiyun #include <linux/dma-mapping.h>
35*4882a593Smuzhiyun #include <linux/iommu.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #if defined(CONFIG_FSL_PAMU)
38*4882a593Smuzhiyun #include <asm/fsl_pamu_stash.h>
39*4882a593Smuzhiyun #endif
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct qm_mcr_querywq {
42*4882a593Smuzhiyun u8 verb;
43*4882a593Smuzhiyun u8 result;
44*4882a593Smuzhiyun u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
45*4882a593Smuzhiyun u8 __reserved[28];
46*4882a593Smuzhiyun u32 wq_len[8];
47*4882a593Smuzhiyun } __packed;
48*4882a593Smuzhiyun
qm_mcr_querywq_get_chan(const struct qm_mcr_querywq * wq)49*4882a593Smuzhiyun static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun return wq->channel_wq >> 3;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun struct __qm_mcr_querycongestion {
55*4882a593Smuzhiyun u32 state[8];
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* "Query Congestion Group State" */
59*4882a593Smuzhiyun struct qm_mcr_querycongestion {
60*4882a593Smuzhiyun u8 verb;
61*4882a593Smuzhiyun u8 result;
62*4882a593Smuzhiyun u8 __reserved[30];
63*4882a593Smuzhiyun /* Access this struct using qman_cgrs_get() */
64*4882a593Smuzhiyun struct __qm_mcr_querycongestion state;
65*4882a593Smuzhiyun } __packed;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* "Query CGR" */
68*4882a593Smuzhiyun struct qm_mcr_querycgr {
69*4882a593Smuzhiyun u8 verb;
70*4882a593Smuzhiyun u8 result;
71*4882a593Smuzhiyun u16 __reserved1;
72*4882a593Smuzhiyun struct __qm_mc_cgr cgr; /* CGR fields */
73*4882a593Smuzhiyun u8 __reserved2[6];
74*4882a593Smuzhiyun u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
75*4882a593Smuzhiyun __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
76*4882a593Smuzhiyun u8 __reserved3[3];
77*4882a593Smuzhiyun u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
78*4882a593Smuzhiyun __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
79*4882a593Smuzhiyun __be32 cscn_targ_swp[4];
80*4882a593Smuzhiyun } __packed;
81*4882a593Smuzhiyun
qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr * q)82*4882a593Smuzhiyun static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
85*4882a593Smuzhiyun }
qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr * q)86*4882a593Smuzhiyun static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Congestion Groups */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * This wrapper represents a bit-array for the state of the 256 QMan congestion
95*4882a593Smuzhiyun * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
96*4882a593Smuzhiyun * those that don't concern us. We harness the structure and accessor details
97*4882a593Smuzhiyun * already used in the management command to query congestion groups.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun #define CGR_BITS_PER_WORD 5
100*4882a593Smuzhiyun #define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
101*4882a593Smuzhiyun #define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
102*4882a593Smuzhiyun #define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct qman_cgrs {
105*4882a593Smuzhiyun struct __qm_mcr_querycongestion q;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
qman_cgrs_init(struct qman_cgrs * c)108*4882a593Smuzhiyun static inline void qman_cgrs_init(struct qman_cgrs *c)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun memset(c, 0, sizeof(*c));
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
qman_cgrs_fill(struct qman_cgrs * c)113*4882a593Smuzhiyun static inline void qman_cgrs_fill(struct qman_cgrs *c)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun memset(c, 0xff, sizeof(*c));
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
qman_cgrs_get(struct qman_cgrs * c,u8 cgr)118*4882a593Smuzhiyun static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
qman_cgrs_cp(struct qman_cgrs * dest,const struct qman_cgrs * src)123*4882a593Smuzhiyun static inline void qman_cgrs_cp(struct qman_cgrs *dest,
124*4882a593Smuzhiyun const struct qman_cgrs *src)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun *dest = *src;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
qman_cgrs_and(struct qman_cgrs * dest,const struct qman_cgrs * a,const struct qman_cgrs * b)129*4882a593Smuzhiyun static inline void qman_cgrs_and(struct qman_cgrs *dest,
130*4882a593Smuzhiyun const struct qman_cgrs *a, const struct qman_cgrs *b)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun int ret;
133*4882a593Smuzhiyun u32 *_d = dest->q.state;
134*4882a593Smuzhiyun const u32 *_a = a->q.state;
135*4882a593Smuzhiyun const u32 *_b = b->q.state;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (ret = 0; ret < 8; ret++)
138*4882a593Smuzhiyun *_d++ = *_a++ & *_b++;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
qman_cgrs_xor(struct qman_cgrs * dest,const struct qman_cgrs * a,const struct qman_cgrs * b)141*4882a593Smuzhiyun static inline void qman_cgrs_xor(struct qman_cgrs *dest,
142*4882a593Smuzhiyun const struct qman_cgrs *a, const struct qman_cgrs *b)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun int ret;
145*4882a593Smuzhiyun u32 *_d = dest->q.state;
146*4882a593Smuzhiyun const u32 *_a = a->q.state;
147*4882a593Smuzhiyun const u32 *_b = b->q.state;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun for (ret = 0; ret < 8; ret++)
150*4882a593Smuzhiyun *_d++ = *_a++ ^ *_b++;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun void qman_init_cgr_all(void);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun struct qm_portal_config {
156*4882a593Smuzhiyun /* Portal addresses */
157*4882a593Smuzhiyun void *addr_virt_ce;
158*4882a593Smuzhiyun void __iomem *addr_virt_ci;
159*4882a593Smuzhiyun struct device *dev;
160*4882a593Smuzhiyun struct iommu_domain *iommu_domain;
161*4882a593Smuzhiyun /* Allow these to be joined in lists */
162*4882a593Smuzhiyun struct list_head list;
163*4882a593Smuzhiyun /* User-visible portal configuration settings */
164*4882a593Smuzhiyun /* portal is affined to this cpu */
165*4882a593Smuzhiyun int cpu;
166*4882a593Smuzhiyun /* portal interrupt line */
167*4882a593Smuzhiyun int irq;
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * the portal's dedicated channel id, used initialising
170*4882a593Smuzhiyun * frame queues to target this portal when scheduled
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun u16 channel;
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * mask of pool channels this portal has dequeue access to
175*4882a593Smuzhiyun * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun u32 pools;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Revision info (for errata and feature handling) */
181*4882a593Smuzhiyun #define QMAN_REV11 0x0101
182*4882a593Smuzhiyun #define QMAN_REV12 0x0102
183*4882a593Smuzhiyun #define QMAN_REV20 0x0200
184*4882a593Smuzhiyun #define QMAN_REV30 0x0300
185*4882a593Smuzhiyun #define QMAN_REV31 0x0301
186*4882a593Smuzhiyun #define QMAN_REV32 0x0302
187*4882a593Smuzhiyun extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
190*4882a593Smuzhiyun extern struct gen_pool *qm_fqalloc; /* FQID allocator */
191*4882a593Smuzhiyun extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
192*4882a593Smuzhiyun extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
193*4882a593Smuzhiyun u32 qm_get_pools_sdqcr(void);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun int qman_wq_alloc(void);
196*4882a593Smuzhiyun #ifdef CONFIG_FSL_PAMU
197*4882a593Smuzhiyun #define qman_liodn_fixup __qman_liodn_fixup
198*4882a593Smuzhiyun #else
qman_liodn_fixup(u16 channel)199*4882a593Smuzhiyun static inline void qman_liodn_fixup(u16 channel)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun void __qman_liodn_fixup(u16 channel);
204*4882a593Smuzhiyun void qman_set_sdest(u16 channel, unsigned int cpu_idx);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun struct qman_portal *qman_create_affine_portal(
207*4882a593Smuzhiyun const struct qm_portal_config *config,
208*4882a593Smuzhiyun const struct qman_cgrs *cgrs);
209*4882a593Smuzhiyun const struct qm_portal_config *qman_destroy_affine_portal(void);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * qman_query_fq - Queries FQD fields (via h/w query command)
213*4882a593Smuzhiyun * @fq: the frame queue object to be queried
214*4882a593Smuzhiyun * @fqd: storage for the queried FQD fields
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun int qman_alloc_fq_table(u32 num_fqids);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* QMan s/w corenet portal, low-level i/face */
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
224*4882a593Smuzhiyun * dequeue TYPE. Choose TOKEN (8-bit).
225*4882a593Smuzhiyun * If SOURCE == CHANNELS,
226*4882a593Smuzhiyun * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
227*4882a593Smuzhiyun * You can choose DEDICATED_PRECEDENCE if the portal channel should have
228*4882a593Smuzhiyun * priority.
229*4882a593Smuzhiyun * If SOURCE == SPECIFICWQ,
230*4882a593Smuzhiyun * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
231*4882a593Smuzhiyun * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
232*4882a593Smuzhiyun * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
233*4882a593Smuzhiyun * same value.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun #define QM_SDQCR_SOURCE_CHANNELS 0x0
236*4882a593Smuzhiyun #define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
237*4882a593Smuzhiyun #define QM_SDQCR_COUNT_EXACT1 0x0
238*4882a593Smuzhiyun #define QM_SDQCR_COUNT_UPTO3 0x20000000
239*4882a593Smuzhiyun #define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
240*4882a593Smuzhiyun #define QM_SDQCR_TYPE_MASK 0x03000000
241*4882a593Smuzhiyun #define QM_SDQCR_TYPE_NULL 0x0
242*4882a593Smuzhiyun #define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
243*4882a593Smuzhiyun #define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
244*4882a593Smuzhiyun #define QM_SDQCR_TYPE_ACTIVE 0x03000000
245*4882a593Smuzhiyun #define QM_SDQCR_TOKEN_MASK 0x00ff0000
246*4882a593Smuzhiyun #define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
247*4882a593Smuzhiyun #define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
248*4882a593Smuzhiyun #define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
249*4882a593Smuzhiyun #define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
250*4882a593Smuzhiyun #define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
251*4882a593Smuzhiyun #define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
252*4882a593Smuzhiyun #define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
255*4882a593Smuzhiyun #define QM_VDQCR_FQID_MASK 0x00ffffff
256*4882a593Smuzhiyun #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * Used by all portal interrupt registers except 'inhibit'
260*4882a593Smuzhiyun * Channels with frame availability
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun #define QM_PIRQ_DQAVAIL 0x0000ffff
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* The DQAVAIL interrupt fields break down into these bits; */
265*4882a593Smuzhiyun #define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
266*4882a593Smuzhiyun #define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
267*4882a593Smuzhiyun #define QM_DQAVAIL_MASK 0xffff
268*4882a593Smuzhiyun /* This mask contains all the "irqsource" bits visible to API users */
269*4882a593Smuzhiyun #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun extern struct qman_portal *affine_portals[NR_CPUS];
272*4882a593Smuzhiyun extern struct qman_portal *qman_dma_portal;
273*4882a593Smuzhiyun const struct qm_portal_config *qman_get_qm_portal_config(
274*4882a593Smuzhiyun struct qman_portal *portal);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun unsigned int qm_get_fqid_maxcnt(void);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun int qman_shutdown_fq(u32 fqid);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun int qman_requires_cleanup(void);
281*4882a593Smuzhiyun void qman_done_cleanup(void);
282*4882a593Smuzhiyun void qman_enable_irqs(void);
283