1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Driver for Marvell NETA network controller Buffer Manager.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2015 Marvell
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Marcin Wojtas <mw@semihalf.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public
9*4882a593Smuzhiyun * License version 2. This program is licensed "as is" without any
10*4882a593Smuzhiyun * warranty of any kind, whether express or implied.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/clk.h>
14*4882a593Smuzhiyun #include <linux/genalloc.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/mbus.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/skbuff.h>
24*4882a593Smuzhiyun #include <net/hwbm.h>
25*4882a593Smuzhiyun #include "mvneta_bm.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
28*4882a593Smuzhiyun #define MVNETA_BM_DRIVER_VERSION "1.0"
29*4882a593Smuzhiyun
mvneta_bm_write(struct mvneta_bm * priv,u32 offset,u32 data)30*4882a593Smuzhiyun static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun writel(data, priv->reg_base + offset);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
mvneta_bm_read(struct mvneta_bm * priv,u32 offset)35*4882a593Smuzhiyun static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun return readl(priv->reg_base + offset);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
mvneta_bm_pool_enable(struct mvneta_bm * priv,int pool_id)40*4882a593Smuzhiyun static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun u32 val;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
45*4882a593Smuzhiyun val |= MVNETA_BM_POOL_ENABLE_MASK;
46*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Clear BM cause register */
49*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
mvneta_bm_pool_disable(struct mvneta_bm * priv,int pool_id)52*4882a593Smuzhiyun static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun u32 val;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
57*4882a593Smuzhiyun val &= ~MVNETA_BM_POOL_ENABLE_MASK;
58*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
mvneta_bm_config_set(struct mvneta_bm * priv,u32 mask)61*4882a593Smuzhiyun static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun u32 val;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
66*4882a593Smuzhiyun val |= mask;
67*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
mvneta_bm_config_clear(struct mvneta_bm * priv,u32 mask)70*4882a593Smuzhiyun static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun u32 val;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
75*4882a593Smuzhiyun val &= ~mask;
76*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
mvneta_bm_pool_target_set(struct mvneta_bm * priv,int pool_id,u8 target_id,u8 attr)79*4882a593Smuzhiyun static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
80*4882a593Smuzhiyun u8 target_id, u8 attr)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun u32 val;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
85*4882a593Smuzhiyun val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
86*4882a593Smuzhiyun val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
87*4882a593Smuzhiyun val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
88*4882a593Smuzhiyun val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
mvneta_bm_construct(struct hwbm_pool * hwbm_pool,void * buf)93*4882a593Smuzhiyun int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct mvneta_bm_pool *bm_pool =
96*4882a593Smuzhiyun (struct mvneta_bm_pool *)hwbm_pool->priv;
97*4882a593Smuzhiyun struct mvneta_bm *priv = bm_pool->priv;
98*4882a593Smuzhiyun dma_addr_t phys_addr;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* In order to update buf_cookie field of RX descriptor properly,
101*4882a593Smuzhiyun * BM hardware expects buf virtual address to be placed in the
102*4882a593Smuzhiyun * first four bytes of mapped buffer.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun *(u32 *)buf = (u32)buf;
105*4882a593Smuzhiyun phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
106*4882a593Smuzhiyun DMA_FROM_DEVICE);
107*4882a593Smuzhiyun if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
108*4882a593Smuzhiyun return -ENOMEM;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_construct);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* Create pool */
mvneta_bm_pool_create(struct mvneta_bm * priv,struct mvneta_bm_pool * bm_pool)116*4882a593Smuzhiyun static int mvneta_bm_pool_create(struct mvneta_bm *priv,
117*4882a593Smuzhiyun struct mvneta_bm_pool *bm_pool)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct platform_device *pdev = priv->pdev;
120*4882a593Smuzhiyun u8 target_id, attr;
121*4882a593Smuzhiyun int size_bytes, err;
122*4882a593Smuzhiyun size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
123*4882a593Smuzhiyun bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
124*4882a593Smuzhiyun &bm_pool->phys_addr,
125*4882a593Smuzhiyun GFP_KERNEL);
126*4882a593Smuzhiyun if (!bm_pool->virt_addr)
127*4882a593Smuzhiyun return -ENOMEM;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
130*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
131*4882a593Smuzhiyun bm_pool->phys_addr);
132*4882a593Smuzhiyun dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
133*4882a593Smuzhiyun bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
134*4882a593Smuzhiyun return -ENOMEM;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
138*4882a593Smuzhiyun &attr);
139*4882a593Smuzhiyun if (err < 0) {
140*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
141*4882a593Smuzhiyun bm_pool->phys_addr);
142*4882a593Smuzhiyun return err;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Set pool address */
146*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
147*4882a593Smuzhiyun bm_pool->phys_addr);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
150*4882a593Smuzhiyun mvneta_bm_pool_enable(priv, bm_pool->id);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Notify the driver that BM pool is being used as specific type and return the
156*4882a593Smuzhiyun * pool pointer on success
157*4882a593Smuzhiyun */
mvneta_bm_pool_use(struct mvneta_bm * priv,u8 pool_id,enum mvneta_bm_type type,u8 port_id,int pkt_size)158*4882a593Smuzhiyun struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
159*4882a593Smuzhiyun enum mvneta_bm_type type, u8 port_id,
160*4882a593Smuzhiyun int pkt_size)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
163*4882a593Smuzhiyun int num, err;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (new_pool->type == MVNETA_BM_LONG &&
166*4882a593Smuzhiyun new_pool->port_map != 1 << port_id) {
167*4882a593Smuzhiyun dev_err(&priv->pdev->dev,
168*4882a593Smuzhiyun "long pool cannot be shared by the ports\n");
169*4882a593Smuzhiyun return NULL;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
173*4882a593Smuzhiyun dev_err(&priv->pdev->dev,
174*4882a593Smuzhiyun "mixing pools' types between the ports is forbidden\n");
175*4882a593Smuzhiyun return NULL;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
179*4882a593Smuzhiyun new_pool->pkt_size = pkt_size;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Allocate buffers in case BM pool hasn't been used yet */
182*4882a593Smuzhiyun if (new_pool->type == MVNETA_BM_FREE) {
183*4882a593Smuzhiyun struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun new_pool->priv = priv;
186*4882a593Smuzhiyun new_pool->type = type;
187*4882a593Smuzhiyun new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
188*4882a593Smuzhiyun hwbm_pool->frag_size =
189*4882a593Smuzhiyun SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
190*4882a593Smuzhiyun SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
191*4882a593Smuzhiyun hwbm_pool->construct = mvneta_bm_construct;
192*4882a593Smuzhiyun hwbm_pool->priv = new_pool;
193*4882a593Smuzhiyun mutex_init(&hwbm_pool->buf_lock);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Create new pool */
196*4882a593Smuzhiyun err = mvneta_bm_pool_create(priv, new_pool);
197*4882a593Smuzhiyun if (err) {
198*4882a593Smuzhiyun dev_err(&priv->pdev->dev, "fail to create pool %d\n",
199*4882a593Smuzhiyun new_pool->id);
200*4882a593Smuzhiyun return NULL;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Allocate buffers for this pool */
204*4882a593Smuzhiyun num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
205*4882a593Smuzhiyun if (num != hwbm_pool->size) {
206*4882a593Smuzhiyun WARN(1, "pool %d: %d of %d allocated\n",
207*4882a593Smuzhiyun new_pool->id, num, hwbm_pool->size);
208*4882a593Smuzhiyun return NULL;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun return new_pool;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Free all buffers from the pool */
mvneta_bm_bufs_free(struct mvneta_bm * priv,struct mvneta_bm_pool * bm_pool,u8 port_map)217*4882a593Smuzhiyun void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
218*4882a593Smuzhiyun u8 port_map)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun int i;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun bm_pool->port_map &= ~port_map;
223*4882a593Smuzhiyun if (bm_pool->port_map)
224*4882a593Smuzhiyun return;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
229*4882a593Smuzhiyun dma_addr_t buf_phys_addr;
230*4882a593Smuzhiyun u32 *vaddr;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Get buffer physical address (indirect access) */
233*4882a593Smuzhiyun buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Work-around to the problems when destroying the pool,
236*4882a593Smuzhiyun * when it occurs that a read access to BPPI returns 0.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun if (buf_phys_addr == 0)
239*4882a593Smuzhiyun continue;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun vaddr = phys_to_virt(buf_phys_addr);
242*4882a593Smuzhiyun if (!vaddr)
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
246*4882a593Smuzhiyun bm_pool->buf_size, DMA_FROM_DEVICE);
247*4882a593Smuzhiyun hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Update BM driver with number of buffers removed from pool */
253*4882a593Smuzhiyun bm_pool->hwbm_pool.buf_num -= i;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Cleanup pool */
mvneta_bm_pool_destroy(struct mvneta_bm * priv,struct mvneta_bm_pool * bm_pool,u8 port_map)258*4882a593Smuzhiyun void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
259*4882a593Smuzhiyun struct mvneta_bm_pool *bm_pool, u8 port_map)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
262*4882a593Smuzhiyun bm_pool->port_map &= ~port_map;
263*4882a593Smuzhiyun if (bm_pool->port_map)
264*4882a593Smuzhiyun return;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun bm_pool->type = MVNETA_BM_FREE;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun mvneta_bm_bufs_free(priv, bm_pool, port_map);
269*4882a593Smuzhiyun if (hwbm_pool->buf_num)
270*4882a593Smuzhiyun WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (bm_pool->virt_addr) {
273*4882a593Smuzhiyun dma_free_coherent(&priv->pdev->dev,
274*4882a593Smuzhiyun sizeof(u32) * hwbm_pool->size,
275*4882a593Smuzhiyun bm_pool->virt_addr, bm_pool->phys_addr);
276*4882a593Smuzhiyun bm_pool->virt_addr = NULL;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun mvneta_bm_pool_disable(priv, bm_pool->id);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
282*4882a593Smuzhiyun
mvneta_bm_pools_init(struct mvneta_bm * priv)283*4882a593Smuzhiyun static void mvneta_bm_pools_init(struct mvneta_bm *priv)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct device_node *dn = priv->pdev->dev.of_node;
286*4882a593Smuzhiyun struct mvneta_bm_pool *bm_pool;
287*4882a593Smuzhiyun char prop[15];
288*4882a593Smuzhiyun u32 size;
289*4882a593Smuzhiyun int i;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Activate BM unit */
292*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Create all pools with maximum size */
295*4882a593Smuzhiyun for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
296*4882a593Smuzhiyun bm_pool = &priv->bm_pools[i];
297*4882a593Smuzhiyun bm_pool->id = i;
298*4882a593Smuzhiyun bm_pool->type = MVNETA_BM_FREE;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Reset read pointer */
301*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Reset write pointer */
304*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Configure pool size according to DT or use default value */
307*4882a593Smuzhiyun sprintf(prop, "pool%d,capacity", i);
308*4882a593Smuzhiyun if (of_property_read_u32(dn, prop, &size)) {
309*4882a593Smuzhiyun size = MVNETA_BM_POOL_CAP_DEF;
310*4882a593Smuzhiyun } else if (size > MVNETA_BM_POOL_CAP_MAX) {
311*4882a593Smuzhiyun dev_warn(&priv->pdev->dev,
312*4882a593Smuzhiyun "Illegal pool %d capacity %d, set to %d\n",
313*4882a593Smuzhiyun i, size, MVNETA_BM_POOL_CAP_MAX);
314*4882a593Smuzhiyun size = MVNETA_BM_POOL_CAP_MAX;
315*4882a593Smuzhiyun } else if (size < MVNETA_BM_POOL_CAP_MIN) {
316*4882a593Smuzhiyun dev_warn(&priv->pdev->dev,
317*4882a593Smuzhiyun "Illegal pool %d capacity %d, set to %d\n",
318*4882a593Smuzhiyun i, size, MVNETA_BM_POOL_CAP_MIN);
319*4882a593Smuzhiyun size = MVNETA_BM_POOL_CAP_MIN;
320*4882a593Smuzhiyun } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
321*4882a593Smuzhiyun dev_warn(&priv->pdev->dev,
322*4882a593Smuzhiyun "Illegal pool %d capacity %d, round to %d\n",
323*4882a593Smuzhiyun i, size, ALIGN(size,
324*4882a593Smuzhiyun MVNETA_BM_POOL_CAP_ALIGN));
325*4882a593Smuzhiyun size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun bm_pool->hwbm_pool.size = size;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
330*4882a593Smuzhiyun bm_pool->hwbm_pool.size);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Obtain custom pkt_size from DT */
333*4882a593Smuzhiyun sprintf(prop, "pool%d,pkt-size", i);
334*4882a593Smuzhiyun if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
335*4882a593Smuzhiyun bm_pool->pkt_size = 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
mvneta_bm_default_set(struct mvneta_bm * priv)339*4882a593Smuzhiyun static void mvneta_bm_default_set(struct mvneta_bm *priv)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun u32 val;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Mask BM all interrupts */
344*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Clear BM cause register */
347*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* Set BM configuration register */
350*4882a593Smuzhiyun val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
353*4882a593Smuzhiyun val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
354*4882a593Smuzhiyun val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
355*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
mvneta_bm_init(struct mvneta_bm * priv)358*4882a593Smuzhiyun static int mvneta_bm_init(struct mvneta_bm *priv)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun mvneta_bm_default_set(priv);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Allocate and initialize BM pools structures */
363*4882a593Smuzhiyun priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
364*4882a593Smuzhiyun sizeof(struct mvneta_bm_pool),
365*4882a593Smuzhiyun GFP_KERNEL);
366*4882a593Smuzhiyun if (!priv->bm_pools)
367*4882a593Smuzhiyun return -ENOMEM;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun mvneta_bm_pools_init(priv);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return 0;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
mvneta_bm_get_sram(struct device_node * dn,struct mvneta_bm * priv)374*4882a593Smuzhiyun static int mvneta_bm_get_sram(struct device_node *dn,
375*4882a593Smuzhiyun struct mvneta_bm *priv)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
378*4882a593Smuzhiyun if (!priv->bppi_pool)
379*4882a593Smuzhiyun return -ENOMEM;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
382*4882a593Smuzhiyun MVNETA_BM_BPPI_SIZE,
383*4882a593Smuzhiyun &priv->bppi_phys_addr);
384*4882a593Smuzhiyun if (!priv->bppi_virt_addr)
385*4882a593Smuzhiyun return -ENOMEM;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
mvneta_bm_put_sram(struct mvneta_bm * priv)390*4882a593Smuzhiyun static void mvneta_bm_put_sram(struct mvneta_bm *priv)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
393*4882a593Smuzhiyun MVNETA_BM_BPPI_SIZE);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
mvneta_bm_get(struct device_node * node)396*4882a593Smuzhiyun struct mvneta_bm *mvneta_bm_get(struct device_node *node)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun struct platform_device *pdev = of_find_device_by_node(node);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return pdev ? platform_get_drvdata(pdev) : NULL;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_get);
403*4882a593Smuzhiyun
mvneta_bm_put(struct mvneta_bm * priv)404*4882a593Smuzhiyun void mvneta_bm_put(struct mvneta_bm *priv)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun platform_device_put(priv->pdev);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mvneta_bm_put);
409*4882a593Smuzhiyun
mvneta_bm_probe(struct platform_device * pdev)410*4882a593Smuzhiyun static int mvneta_bm_probe(struct platform_device *pdev)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct device_node *dn = pdev->dev.of_node;
413*4882a593Smuzhiyun struct mvneta_bm *priv;
414*4882a593Smuzhiyun int err;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
417*4882a593Smuzhiyun if (!priv)
418*4882a593Smuzhiyun return -ENOMEM;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
421*4882a593Smuzhiyun if (IS_ERR(priv->reg_base))
422*4882a593Smuzhiyun return PTR_ERR(priv->reg_base);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun priv->clk = devm_clk_get(&pdev->dev, NULL);
425*4882a593Smuzhiyun if (IS_ERR(priv->clk))
426*4882a593Smuzhiyun return PTR_ERR(priv->clk);
427*4882a593Smuzhiyun err = clk_prepare_enable(priv->clk);
428*4882a593Smuzhiyun if (err < 0)
429*4882a593Smuzhiyun return err;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun err = mvneta_bm_get_sram(dn, priv);
432*4882a593Smuzhiyun if (err < 0) {
433*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to allocate internal memory\n");
434*4882a593Smuzhiyun goto err_clk;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun priv->pdev = pdev;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Initialize buffer manager internals */
440*4882a593Smuzhiyun err = mvneta_bm_init(priv);
441*4882a593Smuzhiyun if (err < 0) {
442*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to initialize controller\n");
443*4882a593Smuzhiyun goto err_sram;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun dn->data = priv;
447*4882a593Smuzhiyun platform_set_drvdata(pdev, priv);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun err_sram:
454*4882a593Smuzhiyun mvneta_bm_put_sram(priv);
455*4882a593Smuzhiyun err_clk:
456*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
457*4882a593Smuzhiyun return err;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
mvneta_bm_remove(struct platform_device * pdev)460*4882a593Smuzhiyun static int mvneta_bm_remove(struct platform_device *pdev)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct mvneta_bm *priv = platform_get_drvdata(pdev);
463*4882a593Smuzhiyun u8 all_ports_map = 0xff;
464*4882a593Smuzhiyun int i = 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
467*4882a593Smuzhiyun struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mvneta_bm_put_sram(priv);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Dectivate BM unit */
475*4882a593Smuzhiyun mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun static const struct of_device_id mvneta_bm_match[] = {
483*4882a593Smuzhiyun { .compatible = "marvell,armada-380-neta-bm" },
484*4882a593Smuzhiyun { }
485*4882a593Smuzhiyun };
486*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mvneta_bm_match);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun static struct platform_driver mvneta_bm_driver = {
489*4882a593Smuzhiyun .probe = mvneta_bm_probe,
490*4882a593Smuzhiyun .remove = mvneta_bm_remove,
491*4882a593Smuzhiyun .driver = {
492*4882a593Smuzhiyun .name = MVNETA_BM_DRIVER_NAME,
493*4882a593Smuzhiyun .of_match_table = mvneta_bm_match,
494*4882a593Smuzhiyun },
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun module_platform_driver(mvneta_bm_driver);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
500*4882a593Smuzhiyun MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
501*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
502