1*4882a593Smuzhiyun /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
4*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met:
5*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
6*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
7*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
8*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
9*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
10*4882a593Smuzhiyun * * Neither the name of Freescale Semiconductor nor the
11*4882a593Smuzhiyun * names of its contributors may be used to endorse or promote products
12*4882a593Smuzhiyun * derived from this software without specific prior written permission.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * ALTERNATIVELY, this software may be distributed under the terms of the
15*4882a593Smuzhiyun * GNU General Public License ("GPL") as published by the Free Software
16*4882a593Smuzhiyun * Foundation, either version 2 of that License or (at your option) any
17*4882a593Smuzhiyun * later version.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20*4882a593Smuzhiyun * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22*4882a593Smuzhiyun * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23*4882a593Smuzhiyun * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25*4882a593Smuzhiyun * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26*4882a593Smuzhiyun * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28*4882a593Smuzhiyun * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "bman_priv.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define IRQNAME "BMan portal %d"
34*4882a593Smuzhiyun #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Portal register assists */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
39*4882a593Smuzhiyun /* Cache-inhibited register offsets */
40*4882a593Smuzhiyun #define BM_REG_RCR_PI_CINH 0x3000
41*4882a593Smuzhiyun #define BM_REG_RCR_CI_CINH 0x3100
42*4882a593Smuzhiyun #define BM_REG_RCR_ITR 0x3200
43*4882a593Smuzhiyun #define BM_REG_CFG 0x3300
44*4882a593Smuzhiyun #define BM_REG_SCN(n) (0x3400 + ((n) << 6))
45*4882a593Smuzhiyun #define BM_REG_ISR 0x3e00
46*4882a593Smuzhiyun #define BM_REG_IER 0x3e40
47*4882a593Smuzhiyun #define BM_REG_ISDR 0x3e80
48*4882a593Smuzhiyun #define BM_REG_IIR 0x3ec0
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Cache-enabled register offsets */
51*4882a593Smuzhiyun #define BM_CL_CR 0x0000
52*4882a593Smuzhiyun #define BM_CL_RR0 0x0100
53*4882a593Smuzhiyun #define BM_CL_RR1 0x0140
54*4882a593Smuzhiyun #define BM_CL_RCR 0x1000
55*4882a593Smuzhiyun #define BM_CL_RCR_PI_CENA 0x3000
56*4882a593Smuzhiyun #define BM_CL_RCR_CI_CENA 0x3100
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #else
59*4882a593Smuzhiyun /* Cache-inhibited register offsets */
60*4882a593Smuzhiyun #define BM_REG_RCR_PI_CINH 0x0000
61*4882a593Smuzhiyun #define BM_REG_RCR_CI_CINH 0x0004
62*4882a593Smuzhiyun #define BM_REG_RCR_ITR 0x0008
63*4882a593Smuzhiyun #define BM_REG_CFG 0x0100
64*4882a593Smuzhiyun #define BM_REG_SCN(n) (0x0200 + ((n) << 2))
65*4882a593Smuzhiyun #define BM_REG_ISR 0x0e00
66*4882a593Smuzhiyun #define BM_REG_IER 0x0e04
67*4882a593Smuzhiyun #define BM_REG_ISDR 0x0e08
68*4882a593Smuzhiyun #define BM_REG_IIR 0x0e0c
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Cache-enabled register offsets */
71*4882a593Smuzhiyun #define BM_CL_CR 0x0000
72*4882a593Smuzhiyun #define BM_CL_RR0 0x0100
73*4882a593Smuzhiyun #define BM_CL_RR1 0x0140
74*4882a593Smuzhiyun #define BM_CL_RCR 0x1000
75*4882a593Smuzhiyun #define BM_CL_RCR_PI_CENA 0x3000
76*4882a593Smuzhiyun #define BM_CL_RCR_CI_CENA 0x3100
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * Portal modes.
81*4882a593Smuzhiyun * Enum types;
82*4882a593Smuzhiyun * pmode == production mode
83*4882a593Smuzhiyun * cmode == consumption mode,
84*4882a593Smuzhiyun * Enum values use 3 letter codes. First letter matches the portal mode,
85*4882a593Smuzhiyun * remaining two letters indicate;
86*4882a593Smuzhiyun * ci == cache-inhibited portal register
87*4882a593Smuzhiyun * ce == cache-enabled portal register
88*4882a593Smuzhiyun * vb == in-band valid-bit (cache-enabled)
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
91*4882a593Smuzhiyun bm_rcr_pci = 0, /* PI index, cache-inhibited */
92*4882a593Smuzhiyun bm_rcr_pce = 1, /* PI index, cache-enabled */
93*4882a593Smuzhiyun bm_rcr_pvb = 2 /* valid-bit */
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun enum bm_rcr_cmode { /* s/w-only */
96*4882a593Smuzhiyun bm_rcr_cci, /* CI index, cache-inhibited */
97*4882a593Smuzhiyun bm_rcr_cce /* CI index, cache-enabled */
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* --- Portal structures --- */
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define BM_RCR_SIZE 8
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Release Command */
106*4882a593Smuzhiyun struct bm_rcr_entry {
107*4882a593Smuzhiyun union {
108*4882a593Smuzhiyun struct {
109*4882a593Smuzhiyun u8 _ncw_verb; /* writes to this are non-coherent */
110*4882a593Smuzhiyun u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
111*4882a593Smuzhiyun u8 __reserved1[62];
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun struct bm_buffer bufs[8];
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun #define BM_RCR_VERB_VBIT 0x80
117*4882a593Smuzhiyun #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
118*4882a593Smuzhiyun #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
119*4882a593Smuzhiyun #define BM_RCR_VERB_CMD_BPID_MULTI 0x30
120*4882a593Smuzhiyun #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct bm_rcr {
123*4882a593Smuzhiyun struct bm_rcr_entry *ring, *cursor;
124*4882a593Smuzhiyun u8 ci, available, ithresh, vbit;
125*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
126*4882a593Smuzhiyun u32 busy;
127*4882a593Smuzhiyun enum bm_rcr_pmode pmode;
128*4882a593Smuzhiyun enum bm_rcr_cmode cmode;
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* MC (Management Command) command */
133*4882a593Smuzhiyun struct bm_mc_command {
134*4882a593Smuzhiyun u8 _ncw_verb; /* writes to this are non-coherent */
135*4882a593Smuzhiyun u8 bpid; /* used by acquire command */
136*4882a593Smuzhiyun u8 __reserved[62];
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun #define BM_MCC_VERB_VBIT 0x80
139*4882a593Smuzhiyun #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
140*4882a593Smuzhiyun #define BM_MCC_VERB_CMD_ACQUIRE 0x10
141*4882a593Smuzhiyun #define BM_MCC_VERB_CMD_QUERY 0x40
142*4882a593Smuzhiyun #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* MC result, Acquire and Query Response */
145*4882a593Smuzhiyun union bm_mc_result {
146*4882a593Smuzhiyun struct {
147*4882a593Smuzhiyun u8 verb;
148*4882a593Smuzhiyun u8 bpid;
149*4882a593Smuzhiyun u8 __reserved[62];
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun struct bm_buffer bufs[8];
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun #define BM_MCR_VERB_VBIT 0x80
154*4882a593Smuzhiyun #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
155*4882a593Smuzhiyun #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
156*4882a593Smuzhiyun #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
157*4882a593Smuzhiyun #define BM_MCR_VERB_CMD_ERR_INVALID 0x60
158*4882a593Smuzhiyun #define BM_MCR_VERB_CMD_ERR_ECC 0x70
159*4882a593Smuzhiyun #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
160*4882a593Smuzhiyun #define BM_MCR_TIMEOUT 10000 /* us */
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun struct bm_mc {
163*4882a593Smuzhiyun struct bm_mc_command *cr;
164*4882a593Smuzhiyun union bm_mc_result *rr;
165*4882a593Smuzhiyun u8 rridx, vbit;
166*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
167*4882a593Smuzhiyun enum {
168*4882a593Smuzhiyun /* Can only be _mc_start()ed */
169*4882a593Smuzhiyun mc_idle,
170*4882a593Smuzhiyun /* Can only be _mc_commit()ed or _mc_abort()ed */
171*4882a593Smuzhiyun mc_user,
172*4882a593Smuzhiyun /* Can only be _mc_retry()ed */
173*4882a593Smuzhiyun mc_hw
174*4882a593Smuzhiyun } state;
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun struct bm_addr {
179*4882a593Smuzhiyun void *ce; /* cache-enabled */
180*4882a593Smuzhiyun __be32 *ce_be; /* Same as above but for direct access */
181*4882a593Smuzhiyun void __iomem *ci; /* cache-inhibited */
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct bm_portal {
185*4882a593Smuzhiyun struct bm_addr addr;
186*4882a593Smuzhiyun struct bm_rcr rcr;
187*4882a593Smuzhiyun struct bm_mc mc;
188*4882a593Smuzhiyun } ____cacheline_aligned;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Cache-inhibited register access. */
bm_in(struct bm_portal * p,u32 offset)191*4882a593Smuzhiyun static inline u32 bm_in(struct bm_portal *p, u32 offset)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun return ioread32be(p->addr.ci + offset);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
bm_out(struct bm_portal * p,u32 offset,u32 val)196*4882a593Smuzhiyun static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun iowrite32be(val, p->addr.ci + offset);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Cache Enabled Portal Access */
bm_cl_invalidate(struct bm_portal * p,u32 offset)202*4882a593Smuzhiyun static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun dpaa_invalidate(p->addr.ce + offset);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
bm_cl_touch_ro(struct bm_portal * p,u32 offset)207*4882a593Smuzhiyun static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun dpaa_touch_ro(p->addr.ce + offset);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
bm_ce_in(struct bm_portal * p,u32 offset)212*4882a593Smuzhiyun static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun struct bman_portal {
218*4882a593Smuzhiyun struct bm_portal p;
219*4882a593Smuzhiyun /* interrupt sources processed by portal_isr(), configurable */
220*4882a593Smuzhiyun unsigned long irq_sources;
221*4882a593Smuzhiyun /* probing time config params for cpu-affine portals */
222*4882a593Smuzhiyun const struct bm_portal_config *config;
223*4882a593Smuzhiyun char irqname[MAX_IRQNAME];
224*4882a593Smuzhiyun };
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun static cpumask_t affine_mask;
227*4882a593Smuzhiyun static DEFINE_SPINLOCK(affine_mask_lock);
228*4882a593Smuzhiyun static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
229*4882a593Smuzhiyun
get_affine_portal(void)230*4882a593Smuzhiyun static inline struct bman_portal *get_affine_portal(void)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun return &get_cpu_var(bman_affine_portal);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
put_affine_portal(void)235*4882a593Smuzhiyun static inline void put_affine_portal(void)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun put_cpu_var(bman_affine_portal);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * This object type refers to a pool, it isn't *the* pool. There may be
242*4882a593Smuzhiyun * more than one such object per BMan buffer pool, eg. if different users of the
243*4882a593Smuzhiyun * pool are operating via different portals.
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun struct bman_pool {
246*4882a593Smuzhiyun /* index of the buffer pool to encapsulate (0-63) */
247*4882a593Smuzhiyun u32 bpid;
248*4882a593Smuzhiyun /* Used for hash-table admin when using depletion notifications. */
249*4882a593Smuzhiyun struct bman_portal *portal;
250*4882a593Smuzhiyun struct bman_pool *next;
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun static u32 poll_portal_slow(struct bman_portal *p, u32 is);
254*4882a593Smuzhiyun
portal_isr(int irq,void * ptr)255*4882a593Smuzhiyun static irqreturn_t portal_isr(int irq, void *ptr)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct bman_portal *p = ptr;
258*4882a593Smuzhiyun struct bm_portal *portal = &p->p;
259*4882a593Smuzhiyun u32 clear = p->irq_sources;
260*4882a593Smuzhiyun u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (unlikely(!is))
263*4882a593Smuzhiyun return IRQ_NONE;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun clear |= poll_portal_slow(p, is);
266*4882a593Smuzhiyun bm_out(portal, BM_REG_ISR, clear);
267*4882a593Smuzhiyun return IRQ_HANDLED;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* --- RCR API --- */
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
273*4882a593Smuzhiyun #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
rcr_carryclear(struct bm_rcr_entry * p)276*4882a593Smuzhiyun static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun uintptr_t addr = (uintptr_t)p;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun addr &= ~RCR_CARRY;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return (struct bm_rcr_entry *)addr;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
286*4882a593Smuzhiyun /* Bit-wise logic to convert a ring pointer to a ring index */
rcr_ptr2idx(struct bm_rcr_entry * e)287*4882a593Smuzhiyun static int rcr_ptr2idx(struct bm_rcr_entry *e)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun #endif
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
rcr_inc(struct bm_rcr * rcr)294*4882a593Smuzhiyun static inline void rcr_inc(struct bm_rcr *rcr)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun /* increment to the next RCR pointer and handle overflow and 'vbit' */
297*4882a593Smuzhiyun struct bm_rcr_entry *partial = rcr->cursor + 1;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun rcr->cursor = rcr_carryclear(partial);
300*4882a593Smuzhiyun if (partial != rcr->cursor)
301*4882a593Smuzhiyun rcr->vbit ^= BM_RCR_VERB_VBIT;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
bm_rcr_get_avail(struct bm_portal * portal)304*4882a593Smuzhiyun static int bm_rcr_get_avail(struct bm_portal *portal)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return rcr->available;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
bm_rcr_get_fill(struct bm_portal * portal)311*4882a593Smuzhiyun static int bm_rcr_get_fill(struct bm_portal *portal)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return BM_RCR_SIZE - 1 - rcr->available;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
bm_rcr_set_ithresh(struct bm_portal * portal,u8 ithresh)318*4882a593Smuzhiyun static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun rcr->ithresh = ithresh;
323*4882a593Smuzhiyun bm_out(portal, BM_REG_RCR_ITR, ithresh);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
bm_rcr_cce_prefetch(struct bm_portal * portal)326*4882a593Smuzhiyun static void bm_rcr_cce_prefetch(struct bm_portal *portal)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun __maybe_unused struct bm_rcr *rcr = &portal->rcr;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
331*4882a593Smuzhiyun bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
bm_rcr_cce_update(struct bm_portal * portal)334*4882a593Smuzhiyun static u8 bm_rcr_cce_update(struct bm_portal *portal)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
337*4882a593Smuzhiyun u8 diff, old_ci = rcr->ci;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
340*4882a593Smuzhiyun rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
341*4882a593Smuzhiyun bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
342*4882a593Smuzhiyun diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
343*4882a593Smuzhiyun rcr->available += diff;
344*4882a593Smuzhiyun return diff;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
bm_rcr_start(struct bm_portal * portal)347*4882a593Smuzhiyun static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun DPAA_ASSERT(!rcr->busy);
352*4882a593Smuzhiyun if (!rcr->available)
353*4882a593Smuzhiyun return NULL;
354*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
355*4882a593Smuzhiyun rcr->busy = 1;
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun dpaa_zero(rcr->cursor);
358*4882a593Smuzhiyun return rcr->cursor;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
bm_rcr_pvb_commit(struct bm_portal * portal,u8 myverb)361*4882a593Smuzhiyun static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
364*4882a593Smuzhiyun struct bm_rcr_entry *rcursor;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun DPAA_ASSERT(rcr->busy);
367*4882a593Smuzhiyun DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
368*4882a593Smuzhiyun DPAA_ASSERT(rcr->available >= 1);
369*4882a593Smuzhiyun dma_wmb();
370*4882a593Smuzhiyun rcursor = rcr->cursor;
371*4882a593Smuzhiyun rcursor->_ncw_verb = myverb | rcr->vbit;
372*4882a593Smuzhiyun dpaa_flush(rcursor);
373*4882a593Smuzhiyun rcr_inc(rcr);
374*4882a593Smuzhiyun rcr->available--;
375*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
376*4882a593Smuzhiyun rcr->busy = 0;
377*4882a593Smuzhiyun #endif
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
bm_rcr_init(struct bm_portal * portal,enum bm_rcr_pmode pmode,enum bm_rcr_cmode cmode)380*4882a593Smuzhiyun static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
381*4882a593Smuzhiyun enum bm_rcr_cmode cmode)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
384*4882a593Smuzhiyun u32 cfg;
385*4882a593Smuzhiyun u8 pi;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun rcr->ring = portal->addr.ce + BM_CL_RCR;
388*4882a593Smuzhiyun rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
389*4882a593Smuzhiyun pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
390*4882a593Smuzhiyun rcr->cursor = rcr->ring + pi;
391*4882a593Smuzhiyun rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
392*4882a593Smuzhiyun BM_RCR_VERB_VBIT : 0;
393*4882a593Smuzhiyun rcr->available = BM_RCR_SIZE - 1
394*4882a593Smuzhiyun - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
395*4882a593Smuzhiyun rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
396*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
397*4882a593Smuzhiyun rcr->busy = 0;
398*4882a593Smuzhiyun rcr->pmode = pmode;
399*4882a593Smuzhiyun rcr->cmode = cmode;
400*4882a593Smuzhiyun #endif
401*4882a593Smuzhiyun cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
402*4882a593Smuzhiyun | (pmode & 0x3); /* BCSP_CFG::RPM */
403*4882a593Smuzhiyun bm_out(portal, BM_REG_CFG, cfg);
404*4882a593Smuzhiyun return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
bm_rcr_finish(struct bm_portal * portal)407*4882a593Smuzhiyun static void bm_rcr_finish(struct bm_portal *portal)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
410*4882a593Smuzhiyun struct bm_rcr *rcr = &portal->rcr;
411*4882a593Smuzhiyun int i;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun DPAA_ASSERT(!rcr->busy);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
416*4882a593Smuzhiyun if (i != rcr_ptr2idx(rcr->cursor))
417*4882a593Smuzhiyun pr_crit("losing uncommitted RCR entries\n");
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
420*4882a593Smuzhiyun if (i != rcr->ci)
421*4882a593Smuzhiyun pr_crit("missing existing RCR completions\n");
422*4882a593Smuzhiyun if (rcr->ci != rcr_ptr2idx(rcr->cursor))
423*4882a593Smuzhiyun pr_crit("RCR destroyed unquiesced\n");
424*4882a593Smuzhiyun #endif
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* --- Management command API --- */
bm_mc_init(struct bm_portal * portal)428*4882a593Smuzhiyun static int bm_mc_init(struct bm_portal *portal)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct bm_mc *mc = &portal->mc;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun mc->cr = portal->addr.ce + BM_CL_CR;
433*4882a593Smuzhiyun mc->rr = portal->addr.ce + BM_CL_RR0;
434*4882a593Smuzhiyun mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
435*4882a593Smuzhiyun 0 : 1;
436*4882a593Smuzhiyun mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
437*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
438*4882a593Smuzhiyun mc->state = mc_idle;
439*4882a593Smuzhiyun #endif
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
bm_mc_finish(struct bm_portal * portal)443*4882a593Smuzhiyun static void bm_mc_finish(struct bm_portal *portal)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
446*4882a593Smuzhiyun struct bm_mc *mc = &portal->mc;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun DPAA_ASSERT(mc->state == mc_idle);
449*4882a593Smuzhiyun if (mc->state != mc_idle)
450*4882a593Smuzhiyun pr_crit("Losing incomplete MC command\n");
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
bm_mc_start(struct bm_portal * portal)454*4882a593Smuzhiyun static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct bm_mc *mc = &portal->mc;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun DPAA_ASSERT(mc->state == mc_idle);
459*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
460*4882a593Smuzhiyun mc->state = mc_user;
461*4882a593Smuzhiyun #endif
462*4882a593Smuzhiyun dpaa_zero(mc->cr);
463*4882a593Smuzhiyun return mc->cr;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
bm_mc_commit(struct bm_portal * portal,u8 myverb)466*4882a593Smuzhiyun static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct bm_mc *mc = &portal->mc;
469*4882a593Smuzhiyun union bm_mc_result *rr = mc->rr + mc->rridx;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun DPAA_ASSERT(mc->state == mc_user);
472*4882a593Smuzhiyun dma_wmb();
473*4882a593Smuzhiyun mc->cr->_ncw_verb = myverb | mc->vbit;
474*4882a593Smuzhiyun dpaa_flush(mc->cr);
475*4882a593Smuzhiyun dpaa_invalidate_touch_ro(rr);
476*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
477*4882a593Smuzhiyun mc->state = mc_hw;
478*4882a593Smuzhiyun #endif
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
bm_mc_result(struct bm_portal * portal)481*4882a593Smuzhiyun static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct bm_mc *mc = &portal->mc;
484*4882a593Smuzhiyun union bm_mc_result *rr = mc->rr + mc->rridx;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun DPAA_ASSERT(mc->state == mc_hw);
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * The inactive response register's verb byte always returns zero until
489*4882a593Smuzhiyun * its command is submitted and completed. This includes the valid-bit,
490*4882a593Smuzhiyun * in case you were wondering...
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun if (!rr->verb) {
493*4882a593Smuzhiyun dpaa_invalidate_touch_ro(rr);
494*4882a593Smuzhiyun return NULL;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun mc->rridx ^= 1;
497*4882a593Smuzhiyun mc->vbit ^= BM_MCC_VERB_VBIT;
498*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA_CHECKING
499*4882a593Smuzhiyun mc->state = mc_idle;
500*4882a593Smuzhiyun #endif
501*4882a593Smuzhiyun return rr;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
bm_mc_result_timeout(struct bm_portal * portal,union bm_mc_result ** mcr)504*4882a593Smuzhiyun static inline int bm_mc_result_timeout(struct bm_portal *portal,
505*4882a593Smuzhiyun union bm_mc_result **mcr)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun int timeout = BM_MCR_TIMEOUT;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun do {
510*4882a593Smuzhiyun *mcr = bm_mc_result(portal);
511*4882a593Smuzhiyun if (*mcr)
512*4882a593Smuzhiyun break;
513*4882a593Smuzhiyun udelay(1);
514*4882a593Smuzhiyun } while (--timeout);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return timeout;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* Disable all BSCN interrupts for the portal */
bm_isr_bscn_disable(struct bm_portal * portal)520*4882a593Smuzhiyun static void bm_isr_bscn_disable(struct bm_portal *portal)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun bm_out(portal, BM_REG_SCN(0), 0);
523*4882a593Smuzhiyun bm_out(portal, BM_REG_SCN(1), 0);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
bman_create_portal(struct bman_portal * portal,const struct bm_portal_config * c)526*4882a593Smuzhiyun static int bman_create_portal(struct bman_portal *portal,
527*4882a593Smuzhiyun const struct bm_portal_config *c)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct bm_portal *p;
530*4882a593Smuzhiyun int ret;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun p = &portal->p;
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * prep the low-level portal struct with the mapped addresses from the
535*4882a593Smuzhiyun * config, everything that follows depends on it and "config" is more
536*4882a593Smuzhiyun * for (de)reference...
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun p->addr.ce = c->addr_virt_ce;
539*4882a593Smuzhiyun p->addr.ce_be = c->addr_virt_ce;
540*4882a593Smuzhiyun p->addr.ci = c->addr_virt_ci;
541*4882a593Smuzhiyun if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
542*4882a593Smuzhiyun dev_err(c->dev, "RCR initialisation failed\n");
543*4882a593Smuzhiyun goto fail_rcr;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun if (bm_mc_init(p)) {
546*4882a593Smuzhiyun dev_err(c->dev, "MC initialisation failed\n");
547*4882a593Smuzhiyun goto fail_mc;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * Default to all BPIDs disabled, we enable as required at
551*4882a593Smuzhiyun * run-time.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun bm_isr_bscn_disable(p);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* Write-to-clear any stale interrupt status bits */
556*4882a593Smuzhiyun bm_out(p, BM_REG_ISDR, 0xffffffff);
557*4882a593Smuzhiyun portal->irq_sources = 0;
558*4882a593Smuzhiyun bm_out(p, BM_REG_IER, 0);
559*4882a593Smuzhiyun bm_out(p, BM_REG_ISR, 0xffffffff);
560*4882a593Smuzhiyun snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
561*4882a593Smuzhiyun if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
562*4882a593Smuzhiyun dev_err(c->dev, "request_irq() failed\n");
563*4882a593Smuzhiyun goto fail_irq;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
567*4882a593Smuzhiyun goto fail_affinity;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /* Need RCR to be empty before continuing */
570*4882a593Smuzhiyun ret = bm_rcr_get_fill(p);
571*4882a593Smuzhiyun if (ret) {
572*4882a593Smuzhiyun dev_err(c->dev, "RCR unclean\n");
573*4882a593Smuzhiyun goto fail_rcr_empty;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun /* Success */
576*4882a593Smuzhiyun portal->config = c;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun bm_out(p, BM_REG_ISDR, 0);
579*4882a593Smuzhiyun bm_out(p, BM_REG_IIR, 0);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return 0;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun fail_rcr_empty:
584*4882a593Smuzhiyun fail_affinity:
585*4882a593Smuzhiyun free_irq(c->irq, portal);
586*4882a593Smuzhiyun fail_irq:
587*4882a593Smuzhiyun bm_mc_finish(p);
588*4882a593Smuzhiyun fail_mc:
589*4882a593Smuzhiyun bm_rcr_finish(p);
590*4882a593Smuzhiyun fail_rcr:
591*4882a593Smuzhiyun return -EIO;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
bman_create_affine_portal(const struct bm_portal_config * c)594*4882a593Smuzhiyun struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun struct bman_portal *portal;
597*4882a593Smuzhiyun int err;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun portal = &per_cpu(bman_affine_portal, c->cpu);
600*4882a593Smuzhiyun err = bman_create_portal(portal, c);
601*4882a593Smuzhiyun if (err)
602*4882a593Smuzhiyun return NULL;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun spin_lock(&affine_mask_lock);
605*4882a593Smuzhiyun cpumask_set_cpu(c->cpu, &affine_mask);
606*4882a593Smuzhiyun spin_unlock(&affine_mask_lock);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun return portal;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
poll_portal_slow(struct bman_portal * p,u32 is)611*4882a593Smuzhiyun static u32 poll_portal_slow(struct bman_portal *p, u32 is)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun u32 ret = is;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (is & BM_PIRQ_RCRI) {
616*4882a593Smuzhiyun bm_rcr_cce_update(&p->p);
617*4882a593Smuzhiyun bm_rcr_set_ithresh(&p->p, 0);
618*4882a593Smuzhiyun bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
619*4882a593Smuzhiyun is &= ~BM_PIRQ_RCRI;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* There should be no status register bits left undefined */
623*4882a593Smuzhiyun DPAA_ASSERT(!is);
624*4882a593Smuzhiyun return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
bman_p_irqsource_add(struct bman_portal * p,u32 bits)627*4882a593Smuzhiyun int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun unsigned long irqflags;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun local_irq_save(irqflags);
632*4882a593Smuzhiyun p->irq_sources |= bits & BM_PIRQ_VISIBLE;
633*4882a593Smuzhiyun bm_out(&p->p, BM_REG_IER, p->irq_sources);
634*4882a593Smuzhiyun local_irq_restore(irqflags);
635*4882a593Smuzhiyun return 0;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
bm_shutdown_pool(u32 bpid)638*4882a593Smuzhiyun int bm_shutdown_pool(u32 bpid)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun int err = 0;
641*4882a593Smuzhiyun struct bm_mc_command *bm_cmd;
642*4882a593Smuzhiyun union bm_mc_result *bm_res;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun struct bman_portal *p = get_affine_portal();
646*4882a593Smuzhiyun while (1) {
647*4882a593Smuzhiyun /* Acquire buffers until empty */
648*4882a593Smuzhiyun bm_cmd = bm_mc_start(&p->p);
649*4882a593Smuzhiyun bm_cmd->bpid = bpid;
650*4882a593Smuzhiyun bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
651*4882a593Smuzhiyun if (!bm_mc_result_timeout(&p->p, &bm_res)) {
652*4882a593Smuzhiyun pr_crit("BMan Acquire Command timedout\n");
653*4882a593Smuzhiyun err = -ETIMEDOUT;
654*4882a593Smuzhiyun goto done;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
657*4882a593Smuzhiyun /* Pool is empty */
658*4882a593Smuzhiyun goto done;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun done:
662*4882a593Smuzhiyun put_affine_portal();
663*4882a593Smuzhiyun return err;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun struct gen_pool *bm_bpalloc;
667*4882a593Smuzhiyun
bm_alloc_bpid_range(u32 * result,u32 count)668*4882a593Smuzhiyun static int bm_alloc_bpid_range(u32 *result, u32 count)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun unsigned long addr;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun addr = gen_pool_alloc(bm_bpalloc, count);
673*4882a593Smuzhiyun if (!addr)
674*4882a593Smuzhiyun return -ENOMEM;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun *result = addr & ~DPAA_GENALLOC_OFF;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun return 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
bm_release_bpid(u32 bpid)681*4882a593Smuzhiyun static int bm_release_bpid(u32 bpid)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun int ret;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun ret = bm_shutdown_pool(bpid);
686*4882a593Smuzhiyun if (ret) {
687*4882a593Smuzhiyun pr_debug("BPID %d leaked\n", bpid);
688*4882a593Smuzhiyun return ret;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
692*4882a593Smuzhiyun return 0;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
bman_new_pool(void)695*4882a593Smuzhiyun struct bman_pool *bman_new_pool(void)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun struct bman_pool *pool = NULL;
698*4882a593Smuzhiyun u32 bpid;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (bm_alloc_bpid_range(&bpid, 1))
701*4882a593Smuzhiyun return NULL;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun pool = kmalloc(sizeof(*pool), GFP_KERNEL);
704*4882a593Smuzhiyun if (!pool)
705*4882a593Smuzhiyun goto err;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun pool->bpid = bpid;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun return pool;
710*4882a593Smuzhiyun err:
711*4882a593Smuzhiyun bm_release_bpid(bpid);
712*4882a593Smuzhiyun kfree(pool);
713*4882a593Smuzhiyun return NULL;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun EXPORT_SYMBOL(bman_new_pool);
716*4882a593Smuzhiyun
bman_free_pool(struct bman_pool * pool)717*4882a593Smuzhiyun void bman_free_pool(struct bman_pool *pool)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun bm_release_bpid(pool->bpid);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun kfree(pool);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun EXPORT_SYMBOL(bman_free_pool);
724*4882a593Smuzhiyun
bman_get_bpid(const struct bman_pool * pool)725*4882a593Smuzhiyun int bman_get_bpid(const struct bman_pool *pool)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun return pool->bpid;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun EXPORT_SYMBOL(bman_get_bpid);
730*4882a593Smuzhiyun
update_rcr_ci(struct bman_portal * p,int avail)731*4882a593Smuzhiyun static void update_rcr_ci(struct bman_portal *p, int avail)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun if (avail)
734*4882a593Smuzhiyun bm_rcr_cce_prefetch(&p->p);
735*4882a593Smuzhiyun else
736*4882a593Smuzhiyun bm_rcr_cce_update(&p->p);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
bman_release(struct bman_pool * pool,const struct bm_buffer * bufs,u8 num)739*4882a593Smuzhiyun int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun struct bman_portal *p;
742*4882a593Smuzhiyun struct bm_rcr_entry *r;
743*4882a593Smuzhiyun unsigned long irqflags;
744*4882a593Smuzhiyun int avail, timeout = 1000; /* 1ms */
745*4882a593Smuzhiyun int i = num - 1;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun DPAA_ASSERT(num > 0 && num <= 8);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun do {
750*4882a593Smuzhiyun p = get_affine_portal();
751*4882a593Smuzhiyun local_irq_save(irqflags);
752*4882a593Smuzhiyun avail = bm_rcr_get_avail(&p->p);
753*4882a593Smuzhiyun if (avail < 2)
754*4882a593Smuzhiyun update_rcr_ci(p, avail);
755*4882a593Smuzhiyun r = bm_rcr_start(&p->p);
756*4882a593Smuzhiyun local_irq_restore(irqflags);
757*4882a593Smuzhiyun put_affine_portal();
758*4882a593Smuzhiyun if (likely(r))
759*4882a593Smuzhiyun break;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun udelay(1);
762*4882a593Smuzhiyun } while (--timeout);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (unlikely(!timeout))
765*4882a593Smuzhiyun return -ETIMEDOUT;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun p = get_affine_portal();
768*4882a593Smuzhiyun local_irq_save(irqflags);
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * we can copy all but the first entry, as this can trigger badness
771*4882a593Smuzhiyun * with the valid-bit
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
774*4882a593Smuzhiyun bm_buffer_set_bpid(r->bufs, pool->bpid);
775*4882a593Smuzhiyun if (i)
776*4882a593Smuzhiyun memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
779*4882a593Smuzhiyun (num & BM_RCR_VERB_BUFCOUNT_MASK));
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun local_irq_restore(irqflags);
782*4882a593Smuzhiyun put_affine_portal();
783*4882a593Smuzhiyun return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun EXPORT_SYMBOL(bman_release);
786*4882a593Smuzhiyun
bman_acquire(struct bman_pool * pool,struct bm_buffer * bufs,u8 num)787*4882a593Smuzhiyun int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct bman_portal *p = get_affine_portal();
790*4882a593Smuzhiyun struct bm_mc_command *mcc;
791*4882a593Smuzhiyun union bm_mc_result *mcr;
792*4882a593Smuzhiyun int ret;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun DPAA_ASSERT(num > 0 && num <= 8);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun mcc = bm_mc_start(&p->p);
797*4882a593Smuzhiyun mcc->bpid = pool->bpid;
798*4882a593Smuzhiyun bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
799*4882a593Smuzhiyun (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
800*4882a593Smuzhiyun if (!bm_mc_result_timeout(&p->p, &mcr)) {
801*4882a593Smuzhiyun put_affine_portal();
802*4882a593Smuzhiyun pr_crit("BMan Acquire Timeout\n");
803*4882a593Smuzhiyun return -ETIMEDOUT;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
806*4882a593Smuzhiyun if (bufs)
807*4882a593Smuzhiyun memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun put_affine_portal();
810*4882a593Smuzhiyun if (ret != num)
811*4882a593Smuzhiyun ret = -ENOMEM;
812*4882a593Smuzhiyun return ret;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun EXPORT_SYMBOL(bman_acquire);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun const struct bm_portal_config *
bman_get_bm_portal_config(const struct bman_portal * portal)817*4882a593Smuzhiyun bman_get_bm_portal_config(const struct bman_portal *portal)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun return portal->config;
820*4882a593Smuzhiyun }
821