1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Broadcom NetXtreme-E RoCE driver.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5*4882a593Smuzhiyun * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun * BSD license below:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
14*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
15*4882a593Smuzhiyun * are met:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
18*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
19*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
20*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
21*4882a593Smuzhiyun * the documentation and/or other materials provided with the
22*4882a593Smuzhiyun * distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25*4882a593Smuzhiyun * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26*4882a593Smuzhiyun * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27*4882a593Smuzhiyun * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31*4882a593Smuzhiyun * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32*4882a593Smuzhiyun * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33*4882a593Smuzhiyun * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34*4882a593Smuzhiyun * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * Description: QPLib resource manager
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define dev_fmt(fmt) "QPLIB: " fmt
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <linux/spinlock.h>
42*4882a593Smuzhiyun #include <linux/pci.h>
43*4882a593Smuzhiyun #include <linux/interrupt.h>
44*4882a593Smuzhiyun #include <linux/inetdevice.h>
45*4882a593Smuzhiyun #include <linux/dma-mapping.h>
46*4882a593Smuzhiyun #include <linux/if_vlan.h>
47*4882a593Smuzhiyun #include <linux/vmalloc.h>
48*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
49*4882a593Smuzhiyun #include <rdma/ib_umem.h>
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #include "roce_hsi.h"
52*4882a593Smuzhiyun #include "qplib_res.h"
53*4882a593Smuzhiyun #include "qplib_sp.h"
54*4882a593Smuzhiyun #include "qplib_rcfw.h"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57*4882a593Smuzhiyun struct bnxt_qplib_stats *stats);
58*4882a593Smuzhiyun static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59*4882a593Smuzhiyun struct bnxt_qplib_chip_ctx *cctx,
60*4882a593Smuzhiyun struct bnxt_qplib_stats *stats);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)63*4882a593Smuzhiyun static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64*4882a593Smuzhiyun bool is_umem)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct pci_dev *pdev = res->pdev;
67*4882a593Smuzhiyun int i;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (!is_umem) {
70*4882a593Smuzhiyun for (i = 0; i < pbl->pg_count; i++) {
71*4882a593Smuzhiyun if (pbl->pg_arr[i])
72*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, pbl->pg_size,
73*4882a593Smuzhiyun (void *)((unsigned long)
74*4882a593Smuzhiyun pbl->pg_arr[i] &
75*4882a593Smuzhiyun PAGE_MASK),
76*4882a593Smuzhiyun pbl->pg_map_arr[i]);
77*4882a593Smuzhiyun else
78*4882a593Smuzhiyun dev_warn(&pdev->dev,
79*4882a593Smuzhiyun "PBL free pg_arr[%d] empty?!\n", i);
80*4882a593Smuzhiyun pbl->pg_arr[i] = NULL;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun vfree(pbl->pg_arr);
84*4882a593Smuzhiyun pbl->pg_arr = NULL;
85*4882a593Smuzhiyun vfree(pbl->pg_map_arr);
86*4882a593Smuzhiyun pbl->pg_map_arr = NULL;
87*4882a593Smuzhiyun pbl->pg_count = 0;
88*4882a593Smuzhiyun pbl->pg_size = 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)91*4882a593Smuzhiyun static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92*4882a593Smuzhiyun struct bnxt_qplib_sg_info *sginfo)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct ib_block_iter biter;
95*4882a593Smuzhiyun int i = 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98*4882a593Smuzhiyun pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99*4882a593Smuzhiyun pbl->pg_arr[i] = NULL;
100*4882a593Smuzhiyun pbl->pg_count++;
101*4882a593Smuzhiyun i++;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)105*4882a593Smuzhiyun static int __alloc_pbl(struct bnxt_qplib_res *res,
106*4882a593Smuzhiyun struct bnxt_qplib_pbl *pbl,
107*4882a593Smuzhiyun struct bnxt_qplib_sg_info *sginfo)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct pci_dev *pdev = res->pdev;
110*4882a593Smuzhiyun bool is_umem = false;
111*4882a593Smuzhiyun u32 pages;
112*4882a593Smuzhiyun int i;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (sginfo->nopte)
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun if (sginfo->umem)
117*4882a593Smuzhiyun pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118*4882a593Smuzhiyun else
119*4882a593Smuzhiyun pages = sginfo->npages;
120*4882a593Smuzhiyun /* page ptr arrays */
121*4882a593Smuzhiyun pbl->pg_arr = vmalloc(pages * sizeof(void *));
122*4882a593Smuzhiyun if (!pbl->pg_arr)
123*4882a593Smuzhiyun return -ENOMEM;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
126*4882a593Smuzhiyun if (!pbl->pg_map_arr) {
127*4882a593Smuzhiyun vfree(pbl->pg_arr);
128*4882a593Smuzhiyun pbl->pg_arr = NULL;
129*4882a593Smuzhiyun return -ENOMEM;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun pbl->pg_count = 0;
132*4882a593Smuzhiyun pbl->pg_size = sginfo->pgsize;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (!sginfo->umem) {
135*4882a593Smuzhiyun for (i = 0; i < pages; i++) {
136*4882a593Smuzhiyun pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137*4882a593Smuzhiyun pbl->pg_size,
138*4882a593Smuzhiyun &pbl->pg_map_arr[i],
139*4882a593Smuzhiyun GFP_KERNEL);
140*4882a593Smuzhiyun if (!pbl->pg_arr[i])
141*4882a593Smuzhiyun goto fail;
142*4882a593Smuzhiyun pbl->pg_count++;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun } else {
145*4882a593Smuzhiyun is_umem = true;
146*4882a593Smuzhiyun bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun fail:
151*4882a593Smuzhiyun __free_pbl(res, pbl, is_umem);
152*4882a593Smuzhiyun return -ENOMEM;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)156*4882a593Smuzhiyun void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157*4882a593Smuzhiyun struct bnxt_qplib_hwq *hwq)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun int i;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (!hwq->max_elements)
162*4882a593Smuzhiyun return;
163*4882a593Smuzhiyun if (hwq->level >= PBL_LVL_MAX)
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun for (i = 0; i < hwq->level + 1; i++) {
167*4882a593Smuzhiyun if (i == hwq->level)
168*4882a593Smuzhiyun __free_pbl(res, &hwq->pbl[i], hwq->is_user);
169*4882a593Smuzhiyun else
170*4882a593Smuzhiyun __free_pbl(res, &hwq->pbl[i], false);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun hwq->level = PBL_LVL_MAX;
174*4882a593Smuzhiyun hwq->max_elements = 0;
175*4882a593Smuzhiyun hwq->element_size = 0;
176*4882a593Smuzhiyun hwq->prod = 0;
177*4882a593Smuzhiyun hwq->cons = 0;
178*4882a593Smuzhiyun hwq->cp_bit = 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* All HWQs are power of 2 in size */
182*4882a593Smuzhiyun
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)183*4882a593Smuzhiyun int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184*4882a593Smuzhiyun struct bnxt_qplib_hwq_attr *hwq_attr)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187*4882a593Smuzhiyun struct bnxt_qplib_sg_info sginfo = {};
188*4882a593Smuzhiyun u32 depth, stride, npbl, npde;
189*4882a593Smuzhiyun dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190*4882a593Smuzhiyun struct bnxt_qplib_res *res;
191*4882a593Smuzhiyun struct pci_dev *pdev;
192*4882a593Smuzhiyun int i, rc, lvl;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun res = hwq_attr->res;
195*4882a593Smuzhiyun pdev = res->pdev;
196*4882a593Smuzhiyun pg_size = hwq_attr->sginfo->pgsize;
197*4882a593Smuzhiyun hwq->level = PBL_LVL_MAX;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun depth = roundup_pow_of_two(hwq_attr->depth);
200*4882a593Smuzhiyun stride = roundup_pow_of_two(hwq_attr->stride);
201*4882a593Smuzhiyun if (hwq_attr->aux_depth) {
202*4882a593Smuzhiyun aux_slots = hwq_attr->aux_depth;
203*4882a593Smuzhiyun aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204*4882a593Smuzhiyun aux_pages = (aux_slots * aux_size) / pg_size;
205*4882a593Smuzhiyun if ((aux_slots * aux_size) % pg_size)
206*4882a593Smuzhiyun aux_pages++;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (!hwq_attr->sginfo->umem) {
210*4882a593Smuzhiyun hwq->is_user = false;
211*4882a593Smuzhiyun npages = (depth * stride) / pg_size + aux_pages;
212*4882a593Smuzhiyun if ((depth * stride) % pg_size)
213*4882a593Smuzhiyun npages++;
214*4882a593Smuzhiyun if (!npages)
215*4882a593Smuzhiyun return -EINVAL;
216*4882a593Smuzhiyun hwq_attr->sginfo->npages = npages;
217*4882a593Smuzhiyun } else {
218*4882a593Smuzhiyun unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
219*4882a593Smuzhiyun hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun hwq->is_user = true;
222*4882a593Smuzhiyun npages = sginfo_num_pages;
223*4882a593Smuzhiyun npages = (npages * PAGE_SIZE) /
224*4882a593Smuzhiyun BIT_ULL(hwq_attr->sginfo->pgshft);
225*4882a593Smuzhiyun if ((sginfo_num_pages * PAGE_SIZE) %
226*4882a593Smuzhiyun BIT_ULL(hwq_attr->sginfo->pgshft))
227*4882a593Smuzhiyun if (!npages)
228*4882a593Smuzhiyun npages++;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (npages == MAX_PBL_LVL_0_PGS) {
232*4882a593Smuzhiyun /* This request is Level 0, map PTE */
233*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
234*4882a593Smuzhiyun if (rc)
235*4882a593Smuzhiyun goto fail;
236*4882a593Smuzhiyun hwq->level = PBL_LVL_0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (npages > MAX_PBL_LVL_0_PGS) {
240*4882a593Smuzhiyun if (npages > MAX_PBL_LVL_1_PGS) {
241*4882a593Smuzhiyun u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
242*4882a593Smuzhiyun 0 : PTU_PTE_VALID;
243*4882a593Smuzhiyun /* 2 levels of indirection */
244*4882a593Smuzhiyun npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
245*4882a593Smuzhiyun if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
246*4882a593Smuzhiyun npbl++;
247*4882a593Smuzhiyun npde = npbl >> MAX_PDL_LVL_SHIFT;
248*4882a593Smuzhiyun if (npbl % BIT(MAX_PDL_LVL_SHIFT))
249*4882a593Smuzhiyun npde++;
250*4882a593Smuzhiyun /* Alloc PDE pages */
251*4882a593Smuzhiyun sginfo.pgsize = npde * pg_size;
252*4882a593Smuzhiyun sginfo.npages = 1;
253*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* Alloc PBL pages */
256*4882a593Smuzhiyun sginfo.npages = npbl;
257*4882a593Smuzhiyun sginfo.pgsize = PAGE_SIZE;
258*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
259*4882a593Smuzhiyun if (rc)
260*4882a593Smuzhiyun goto fail;
261*4882a593Smuzhiyun /* Fill PDL with PBL page pointers */
262*4882a593Smuzhiyun dst_virt_ptr =
263*4882a593Smuzhiyun (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
264*4882a593Smuzhiyun src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
265*4882a593Smuzhiyun if (hwq_attr->type == HWQ_TYPE_MR) {
266*4882a593Smuzhiyun /* For MR it is expected that we supply only 1 contigous
267*4882a593Smuzhiyun * page i.e only 1 entry in the PDL that will contain
268*4882a593Smuzhiyun * all the PBLs for the user supplied memory region
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
271*4882a593Smuzhiyun i++)
272*4882a593Smuzhiyun dst_virt_ptr[0][i] = src_phys_ptr[i] |
273*4882a593Smuzhiyun flag;
274*4882a593Smuzhiyun } else {
275*4882a593Smuzhiyun for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
276*4882a593Smuzhiyun i++)
277*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
278*4882a593Smuzhiyun src_phys_ptr[i] |
279*4882a593Smuzhiyun PTU_PDE_VALID;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun /* Alloc or init PTEs */
282*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
283*4882a593Smuzhiyun hwq_attr->sginfo);
284*4882a593Smuzhiyun if (rc)
285*4882a593Smuzhiyun goto fail;
286*4882a593Smuzhiyun hwq->level = PBL_LVL_2;
287*4882a593Smuzhiyun if (hwq_attr->sginfo->nopte)
288*4882a593Smuzhiyun goto done;
289*4882a593Smuzhiyun /* Fill PBLs with PTE pointers */
290*4882a593Smuzhiyun dst_virt_ptr =
291*4882a593Smuzhiyun (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
292*4882a593Smuzhiyun src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
293*4882a593Smuzhiyun for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
294*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
295*4882a593Smuzhiyun src_phys_ptr[i] | PTU_PTE_VALID;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun if (hwq_attr->type == HWQ_TYPE_QUEUE) {
298*4882a593Smuzhiyun /* Find the last pg of the size */
299*4882a593Smuzhiyun i = hwq->pbl[PBL_LVL_2].pg_count;
300*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
301*4882a593Smuzhiyun PTU_PTE_LAST;
302*4882a593Smuzhiyun if (i > 1)
303*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i - 2)]
304*4882a593Smuzhiyun [PTR_IDX(i - 2)] |=
305*4882a593Smuzhiyun PTU_PTE_NEXT_TO_LAST;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun } else { /* pages < 512 npbl = 1, npde = 0 */
308*4882a593Smuzhiyun u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
309*4882a593Smuzhiyun 0 : PTU_PTE_VALID;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* 1 level of indirection */
312*4882a593Smuzhiyun npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
313*4882a593Smuzhiyun if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
314*4882a593Smuzhiyun npbl++;
315*4882a593Smuzhiyun sginfo.npages = npbl;
316*4882a593Smuzhiyun sginfo.pgsize = PAGE_SIZE;
317*4882a593Smuzhiyun /* Alloc PBL page */
318*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
319*4882a593Smuzhiyun if (rc)
320*4882a593Smuzhiyun goto fail;
321*4882a593Smuzhiyun /* Alloc or init PTEs */
322*4882a593Smuzhiyun rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
323*4882a593Smuzhiyun hwq_attr->sginfo);
324*4882a593Smuzhiyun if (rc)
325*4882a593Smuzhiyun goto fail;
326*4882a593Smuzhiyun hwq->level = PBL_LVL_1;
327*4882a593Smuzhiyun if (hwq_attr->sginfo->nopte)
328*4882a593Smuzhiyun goto done;
329*4882a593Smuzhiyun /* Fill PBL with PTE pointers */
330*4882a593Smuzhiyun dst_virt_ptr =
331*4882a593Smuzhiyun (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
332*4882a593Smuzhiyun src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
333*4882a593Smuzhiyun for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
334*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
335*4882a593Smuzhiyun src_phys_ptr[i] | flag;
336*4882a593Smuzhiyun if (hwq_attr->type == HWQ_TYPE_QUEUE) {
337*4882a593Smuzhiyun /* Find the last pg of the size */
338*4882a593Smuzhiyun i = hwq->pbl[PBL_LVL_1].pg_count;
339*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
340*4882a593Smuzhiyun PTU_PTE_LAST;
341*4882a593Smuzhiyun if (i > 1)
342*4882a593Smuzhiyun dst_virt_ptr[PTR_PG(i - 2)]
343*4882a593Smuzhiyun [PTR_IDX(i - 2)] |=
344*4882a593Smuzhiyun PTU_PTE_NEXT_TO_LAST;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun done:
349*4882a593Smuzhiyun hwq->prod = 0;
350*4882a593Smuzhiyun hwq->cons = 0;
351*4882a593Smuzhiyun hwq->pdev = pdev;
352*4882a593Smuzhiyun hwq->depth = hwq_attr->depth;
353*4882a593Smuzhiyun hwq->max_elements = depth;
354*4882a593Smuzhiyun hwq->element_size = stride;
355*4882a593Smuzhiyun hwq->qe_ppg = pg_size / stride;
356*4882a593Smuzhiyun /* For direct access to the elements */
357*4882a593Smuzhiyun lvl = hwq->level;
358*4882a593Smuzhiyun if (hwq_attr->sginfo->nopte && hwq->level)
359*4882a593Smuzhiyun lvl = hwq->level - 1;
360*4882a593Smuzhiyun hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
361*4882a593Smuzhiyun hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
362*4882a593Smuzhiyun spin_lock_init(&hwq->lock);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun fail:
366*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, hwq);
367*4882a593Smuzhiyun return -ENOMEM;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Context Tables */
bnxt_qplib_free_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)371*4882a593Smuzhiyun void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
372*4882a593Smuzhiyun struct bnxt_qplib_ctx *ctx)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun int i;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
377*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
378*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
379*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
380*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
381*4882a593Smuzhiyun for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
382*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
383*4882a593Smuzhiyun /* restore original pde level before destroy */
384*4882a593Smuzhiyun ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
385*4882a593Smuzhiyun bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
386*4882a593Smuzhiyun bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)389*4882a593Smuzhiyun static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
390*4882a593Smuzhiyun struct bnxt_qplib_ctx *ctx)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct bnxt_qplib_hwq_attr hwq_attr = {};
393*4882a593Smuzhiyun struct bnxt_qplib_sg_info sginfo = {};
394*4882a593Smuzhiyun struct bnxt_qplib_tqm_ctx *tqmctx;
395*4882a593Smuzhiyun int rc = 0;
396*4882a593Smuzhiyun int i;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun tqmctx = &ctx->tqm_ctx;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun sginfo.pgsize = PAGE_SIZE;
401*4882a593Smuzhiyun sginfo.pgshft = PAGE_SHIFT;
402*4882a593Smuzhiyun hwq_attr.sginfo = &sginfo;
403*4882a593Smuzhiyun hwq_attr.res = res;
404*4882a593Smuzhiyun hwq_attr.type = HWQ_TYPE_CTX;
405*4882a593Smuzhiyun hwq_attr.depth = 512;
406*4882a593Smuzhiyun hwq_attr.stride = sizeof(u64);
407*4882a593Smuzhiyun /* Alloc pdl buffer */
408*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
409*4882a593Smuzhiyun if (rc)
410*4882a593Smuzhiyun goto out;
411*4882a593Smuzhiyun /* Save original pdl level */
412*4882a593Smuzhiyun tqmctx->pde_level = tqmctx->pde.level;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun hwq_attr.stride = 1;
415*4882a593Smuzhiyun for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
416*4882a593Smuzhiyun if (!tqmctx->qcount[i])
417*4882a593Smuzhiyun continue;
418*4882a593Smuzhiyun hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
419*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
420*4882a593Smuzhiyun if (rc)
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun out:
424*4882a593Smuzhiyun return rc;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)427*4882a593Smuzhiyun static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct bnxt_qplib_hwq *tbl;
430*4882a593Smuzhiyun dma_addr_t *dma_ptr;
431*4882a593Smuzhiyun __le64 **pbl_ptr, *ptr;
432*4882a593Smuzhiyun int i, j, k;
433*4882a593Smuzhiyun int fnz_idx = -1;
434*4882a593Smuzhiyun int pg_count;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
439*4882a593Smuzhiyun i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
440*4882a593Smuzhiyun tbl = &ctx->qtbl[i];
441*4882a593Smuzhiyun if (!tbl->max_elements)
442*4882a593Smuzhiyun continue;
443*4882a593Smuzhiyun if (fnz_idx == -1)
444*4882a593Smuzhiyun fnz_idx = i; /* first non-zero index */
445*4882a593Smuzhiyun switch (tbl->level) {
446*4882a593Smuzhiyun case PBL_LVL_2:
447*4882a593Smuzhiyun pg_count = tbl->pbl[PBL_LVL_1].pg_count;
448*4882a593Smuzhiyun for (k = 0; k < pg_count; k++) {
449*4882a593Smuzhiyun ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
450*4882a593Smuzhiyun dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
451*4882a593Smuzhiyun *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun break;
454*4882a593Smuzhiyun case PBL_LVL_1:
455*4882a593Smuzhiyun case PBL_LVL_0:
456*4882a593Smuzhiyun default:
457*4882a593Smuzhiyun ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
458*4882a593Smuzhiyun *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
459*4882a593Smuzhiyun PTU_PTE_VALID);
460*4882a593Smuzhiyun break;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun if (fnz_idx == -1)
464*4882a593Smuzhiyun fnz_idx = 0;
465*4882a593Smuzhiyun /* update pde level as per page table programming */
466*4882a593Smuzhiyun ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
467*4882a593Smuzhiyun ctx->qtbl[fnz_idx].level + 1;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)470*4882a593Smuzhiyun static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
471*4882a593Smuzhiyun struct bnxt_qplib_ctx *ctx)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun int rc = 0;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
476*4882a593Smuzhiyun if (rc)
477*4882a593Smuzhiyun goto fail;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
480*4882a593Smuzhiyun fail:
481*4882a593Smuzhiyun return rc;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * Routine: bnxt_qplib_alloc_ctx
486*4882a593Smuzhiyun * Description:
487*4882a593Smuzhiyun * Context tables are memories which are used by the chip fw.
488*4882a593Smuzhiyun * The 6 tables defined are:
489*4882a593Smuzhiyun * QPC ctx - holds QP states
490*4882a593Smuzhiyun * MRW ctx - holds memory region and window
491*4882a593Smuzhiyun * SRQ ctx - holds shared RQ states
492*4882a593Smuzhiyun * CQ ctx - holds completion queue states
493*4882a593Smuzhiyun * TQM ctx - holds Tx Queue Manager context
494*4882a593Smuzhiyun * TIM ctx - holds timer context
495*4882a593Smuzhiyun * Depending on the size of the tbl requested, either a 1 Page Buffer List
496*4882a593Smuzhiyun * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
497*4882a593Smuzhiyun * instead.
498*4882a593Smuzhiyun * Table might be employed as follows:
499*4882a593Smuzhiyun * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
500*4882a593Smuzhiyun * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
501*4882a593Smuzhiyun * For 512 < ctx size <= MAX, 2 levels of ind is used
502*4882a593Smuzhiyun * Returns:
503*4882a593Smuzhiyun * 0 if success, else -ERRORS
504*4882a593Smuzhiyun */
bnxt_qplib_alloc_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx,bool virt_fn,bool is_p5)505*4882a593Smuzhiyun int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
506*4882a593Smuzhiyun struct bnxt_qplib_ctx *ctx,
507*4882a593Smuzhiyun bool virt_fn, bool is_p5)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun struct bnxt_qplib_hwq_attr hwq_attr = {};
510*4882a593Smuzhiyun struct bnxt_qplib_sg_info sginfo = {};
511*4882a593Smuzhiyun int rc = 0;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (virt_fn || is_p5)
514*4882a593Smuzhiyun goto stats_alloc;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* QPC Tables */
517*4882a593Smuzhiyun sginfo.pgsize = PAGE_SIZE;
518*4882a593Smuzhiyun sginfo.pgshft = PAGE_SHIFT;
519*4882a593Smuzhiyun hwq_attr.sginfo = &sginfo;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun hwq_attr.res = res;
522*4882a593Smuzhiyun hwq_attr.depth = ctx->qpc_count;
523*4882a593Smuzhiyun hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
524*4882a593Smuzhiyun hwq_attr.type = HWQ_TYPE_CTX;
525*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
526*4882a593Smuzhiyun if (rc)
527*4882a593Smuzhiyun goto fail;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* MRW Tables */
530*4882a593Smuzhiyun hwq_attr.depth = ctx->mrw_count;
531*4882a593Smuzhiyun hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
532*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
533*4882a593Smuzhiyun if (rc)
534*4882a593Smuzhiyun goto fail;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* SRQ Tables */
537*4882a593Smuzhiyun hwq_attr.depth = ctx->srqc_count;
538*4882a593Smuzhiyun hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
539*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
540*4882a593Smuzhiyun if (rc)
541*4882a593Smuzhiyun goto fail;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* CQ Tables */
544*4882a593Smuzhiyun hwq_attr.depth = ctx->cq_count;
545*4882a593Smuzhiyun hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
546*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
547*4882a593Smuzhiyun if (rc)
548*4882a593Smuzhiyun goto fail;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* TQM Buffer */
551*4882a593Smuzhiyun rc = bnxt_qplib_setup_tqm_rings(res, ctx);
552*4882a593Smuzhiyun if (rc)
553*4882a593Smuzhiyun goto fail;
554*4882a593Smuzhiyun /* TIM Buffer */
555*4882a593Smuzhiyun ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
556*4882a593Smuzhiyun hwq_attr.depth = ctx->qpc_count * 16;
557*4882a593Smuzhiyun hwq_attr.stride = 1;
558*4882a593Smuzhiyun rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
559*4882a593Smuzhiyun if (rc)
560*4882a593Smuzhiyun goto fail;
561*4882a593Smuzhiyun stats_alloc:
562*4882a593Smuzhiyun /* Stats */
563*4882a593Smuzhiyun rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
564*4882a593Smuzhiyun if (rc)
565*4882a593Smuzhiyun goto fail;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun fail:
570*4882a593Smuzhiyun bnxt_qplib_free_ctx(res, ctx);
571*4882a593Smuzhiyun return rc;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* GUID */
bnxt_qplib_get_guid(u8 * dev_addr,u8 * guid)575*4882a593Smuzhiyun void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun u8 mac[ETH_ALEN];
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* MAC-48 to EUI-64 mapping */
580*4882a593Smuzhiyun memcpy(mac, dev_addr, ETH_ALEN);
581*4882a593Smuzhiyun guid[0] = mac[0] ^ 2;
582*4882a593Smuzhiyun guid[1] = mac[1];
583*4882a593Smuzhiyun guid[2] = mac[2];
584*4882a593Smuzhiyun guid[3] = 0xff;
585*4882a593Smuzhiyun guid[4] = 0xfe;
586*4882a593Smuzhiyun guid[5] = mac[3];
587*4882a593Smuzhiyun guid[6] = mac[4];
588*4882a593Smuzhiyun guid[7] = mac[5];
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)591*4882a593Smuzhiyun static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
592*4882a593Smuzhiyun struct bnxt_qplib_sgid_tbl *sgid_tbl)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun kfree(sgid_tbl->tbl);
595*4882a593Smuzhiyun kfree(sgid_tbl->hw_id);
596*4882a593Smuzhiyun kfree(sgid_tbl->ctx);
597*4882a593Smuzhiyun kfree(sgid_tbl->vlan);
598*4882a593Smuzhiyun sgid_tbl->tbl = NULL;
599*4882a593Smuzhiyun sgid_tbl->hw_id = NULL;
600*4882a593Smuzhiyun sgid_tbl->ctx = NULL;
601*4882a593Smuzhiyun sgid_tbl->vlan = NULL;
602*4882a593Smuzhiyun sgid_tbl->max = 0;
603*4882a593Smuzhiyun sgid_tbl->active = 0;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,u16 max)606*4882a593Smuzhiyun static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
607*4882a593Smuzhiyun struct bnxt_qplib_sgid_tbl *sgid_tbl,
608*4882a593Smuzhiyun u16 max)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
611*4882a593Smuzhiyun if (!sgid_tbl->tbl)
612*4882a593Smuzhiyun return -ENOMEM;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
615*4882a593Smuzhiyun if (!sgid_tbl->hw_id)
616*4882a593Smuzhiyun goto out_free1;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
619*4882a593Smuzhiyun if (!sgid_tbl->ctx)
620*4882a593Smuzhiyun goto out_free2;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
623*4882a593Smuzhiyun if (!sgid_tbl->vlan)
624*4882a593Smuzhiyun goto out_free3;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun sgid_tbl->max = max;
627*4882a593Smuzhiyun return 0;
628*4882a593Smuzhiyun out_free3:
629*4882a593Smuzhiyun kfree(sgid_tbl->ctx);
630*4882a593Smuzhiyun sgid_tbl->ctx = NULL;
631*4882a593Smuzhiyun out_free2:
632*4882a593Smuzhiyun kfree(sgid_tbl->hw_id);
633*4882a593Smuzhiyun sgid_tbl->hw_id = NULL;
634*4882a593Smuzhiyun out_free1:
635*4882a593Smuzhiyun kfree(sgid_tbl->tbl);
636*4882a593Smuzhiyun sgid_tbl->tbl = NULL;
637*4882a593Smuzhiyun return -ENOMEM;
638*4882a593Smuzhiyun };
639*4882a593Smuzhiyun
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)640*4882a593Smuzhiyun static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
641*4882a593Smuzhiyun struct bnxt_qplib_sgid_tbl *sgid_tbl)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun int i;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (i = 0; i < sgid_tbl->max; i++) {
646*4882a593Smuzhiyun if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
647*4882a593Smuzhiyun sizeof(bnxt_qplib_gid_zero)))
648*4882a593Smuzhiyun bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
649*4882a593Smuzhiyun sgid_tbl->tbl[i].vlan_id, true);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
652*4882a593Smuzhiyun memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
653*4882a593Smuzhiyun memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
654*4882a593Smuzhiyun sgid_tbl->active = 0;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct net_device * netdev)657*4882a593Smuzhiyun static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
658*4882a593Smuzhiyun struct net_device *netdev)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun u32 i;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (i = 0; i < sgid_tbl->max; i++)
663*4882a593Smuzhiyun sgid_tbl->tbl[i].vlan_id = 0xffff;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl)668*4882a593Smuzhiyun static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
669*4882a593Smuzhiyun struct bnxt_qplib_pkey_tbl *pkey_tbl)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun if (!pkey_tbl->tbl)
672*4882a593Smuzhiyun dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
673*4882a593Smuzhiyun else
674*4882a593Smuzhiyun kfree(pkey_tbl->tbl);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun pkey_tbl->tbl = NULL;
677*4882a593Smuzhiyun pkey_tbl->max = 0;
678*4882a593Smuzhiyun pkey_tbl->active = 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl,u16 max)681*4882a593Smuzhiyun static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
682*4882a593Smuzhiyun struct bnxt_qplib_pkey_tbl *pkey_tbl,
683*4882a593Smuzhiyun u16 max)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
686*4882a593Smuzhiyun if (!pkey_tbl->tbl)
687*4882a593Smuzhiyun return -ENOMEM;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun pkey_tbl->max = max;
690*4882a593Smuzhiyun return 0;
691*4882a593Smuzhiyun };
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)694*4882a593Smuzhiyun int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun u32 bit_num;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun bit_num = find_first_bit(pdt->tbl, pdt->max);
699*4882a593Smuzhiyun if (bit_num == pdt->max)
700*4882a593Smuzhiyun return -ENOMEM;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Found unused PD */
703*4882a593Smuzhiyun clear_bit(bit_num, pdt->tbl);
704*4882a593Smuzhiyun pd->id = bit_num;
705*4882a593Smuzhiyun return 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)708*4882a593Smuzhiyun int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
709*4882a593Smuzhiyun struct bnxt_qplib_pd_tbl *pdt,
710*4882a593Smuzhiyun struct bnxt_qplib_pd *pd)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun if (test_and_set_bit(pd->id, pdt->tbl)) {
713*4882a593Smuzhiyun dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
714*4882a593Smuzhiyun pd->id);
715*4882a593Smuzhiyun return -EINVAL;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun pd->id = 0;
718*4882a593Smuzhiyun return 0;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)721*4882a593Smuzhiyun static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun kfree(pdt->tbl);
724*4882a593Smuzhiyun pdt->tbl = NULL;
725*4882a593Smuzhiyun pdt->max = 0;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,u32 max)728*4882a593Smuzhiyun static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
729*4882a593Smuzhiyun struct bnxt_qplib_pd_tbl *pdt,
730*4882a593Smuzhiyun u32 max)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun u32 bytes;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun bytes = max >> 3;
735*4882a593Smuzhiyun if (!bytes)
736*4882a593Smuzhiyun bytes = 1;
737*4882a593Smuzhiyun pdt->tbl = kmalloc(bytes, GFP_KERNEL);
738*4882a593Smuzhiyun if (!pdt->tbl)
739*4882a593Smuzhiyun return -ENOMEM;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun pdt->max = max;
742*4882a593Smuzhiyun memset((u8 *)pdt->tbl, 0xFF, bytes);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun return 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi,void * app)748*4882a593Smuzhiyun int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
749*4882a593Smuzhiyun struct bnxt_qplib_dpi *dpi,
750*4882a593Smuzhiyun void *app)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun u32 bit_num;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun bit_num = find_first_bit(dpit->tbl, dpit->max);
755*4882a593Smuzhiyun if (bit_num == dpit->max)
756*4882a593Smuzhiyun return -ENOMEM;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* Found unused DPI */
759*4882a593Smuzhiyun clear_bit(bit_num, dpit->tbl);
760*4882a593Smuzhiyun dpit->app_tbl[bit_num] = app;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun dpi->dpi = bit_num;
763*4882a593Smuzhiyun dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
764*4882a593Smuzhiyun dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun return 0;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi)769*4882a593Smuzhiyun int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
770*4882a593Smuzhiyun struct bnxt_qplib_dpi_tbl *dpit,
771*4882a593Smuzhiyun struct bnxt_qplib_dpi *dpi)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun if (dpi->dpi >= dpit->max) {
774*4882a593Smuzhiyun dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
775*4882a593Smuzhiyun return -EINVAL;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
778*4882a593Smuzhiyun dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
779*4882a593Smuzhiyun dpi->dpi);
780*4882a593Smuzhiyun return -EINVAL;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun if (dpit->app_tbl)
783*4882a593Smuzhiyun dpit->app_tbl[dpi->dpi] = NULL;
784*4882a593Smuzhiyun memset(dpi, 0, sizeof(*dpi));
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit)789*4882a593Smuzhiyun static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
790*4882a593Smuzhiyun struct bnxt_qplib_dpi_tbl *dpit)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun kfree(dpit->tbl);
793*4882a593Smuzhiyun kfree(dpit->app_tbl);
794*4882a593Smuzhiyun if (dpit->dbr_bar_reg_iomem)
795*4882a593Smuzhiyun pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
796*4882a593Smuzhiyun memset(dpit, 0, sizeof(*dpit));
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,u32 dbr_offset)799*4882a593Smuzhiyun static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
800*4882a593Smuzhiyun struct bnxt_qplib_dpi_tbl *dpit,
801*4882a593Smuzhiyun u32 dbr_offset)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
804*4882a593Smuzhiyun resource_size_t bar_reg_base;
805*4882a593Smuzhiyun u32 dbr_len, bytes;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun if (dpit->dbr_bar_reg_iomem) {
808*4882a593Smuzhiyun dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
809*4882a593Smuzhiyun dbr_bar_reg);
810*4882a593Smuzhiyun return -EALREADY;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
814*4882a593Smuzhiyun if (!bar_reg_base) {
815*4882a593Smuzhiyun dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
816*4882a593Smuzhiyun dbr_bar_reg);
817*4882a593Smuzhiyun return -ENOMEM;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
821*4882a593Smuzhiyun if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
822*4882a593Smuzhiyun dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
823*4882a593Smuzhiyun return -ENOMEM;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
827*4882a593Smuzhiyun dbr_len);
828*4882a593Smuzhiyun if (!dpit->dbr_bar_reg_iomem) {
829*4882a593Smuzhiyun dev_err(&res->pdev->dev,
830*4882a593Smuzhiyun "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
831*4882a593Smuzhiyun return -ENOMEM;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun dpit->unmapped_dbr = bar_reg_base + dbr_offset;
835*4882a593Smuzhiyun dpit->max = dbr_len / PAGE_SIZE;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
838*4882a593Smuzhiyun if (!dpit->app_tbl)
839*4882a593Smuzhiyun goto unmap_io;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun bytes = dpit->max >> 3;
842*4882a593Smuzhiyun if (!bytes)
843*4882a593Smuzhiyun bytes = 1;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun dpit->tbl = kmalloc(bytes, GFP_KERNEL);
846*4882a593Smuzhiyun if (!dpit->tbl) {
847*4882a593Smuzhiyun kfree(dpit->app_tbl);
848*4882a593Smuzhiyun dpit->app_tbl = NULL;
849*4882a593Smuzhiyun goto unmap_io;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun memset((u8 *)dpit->tbl, 0xFF, bytes);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun unmap_io:
857*4882a593Smuzhiyun pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
858*4882a593Smuzhiyun dpit->dbr_bar_reg_iomem = NULL;
859*4882a593Smuzhiyun return -ENOMEM;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* PKEYs */
bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl * pkey_tbl)863*4882a593Smuzhiyun static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
866*4882a593Smuzhiyun pkey_tbl->active = 0;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl)869*4882a593Smuzhiyun static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
870*4882a593Smuzhiyun struct bnxt_qplib_pkey_tbl *pkey_tbl)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun u16 pkey = 0xFFFF;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* pkey default = 0xFFFF */
877*4882a593Smuzhiyun bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* Stats */
bnxt_qplib_free_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_stats * stats)881*4882a593Smuzhiyun static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
882*4882a593Smuzhiyun struct bnxt_qplib_stats *stats)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun if (stats->dma) {
885*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, stats->size,
886*4882a593Smuzhiyun stats->dma, stats->dma_map);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun memset(stats, 0, sizeof(*stats));
889*4882a593Smuzhiyun stats->fw_id = -1;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
bnxt_qplib_alloc_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)892*4882a593Smuzhiyun static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
893*4882a593Smuzhiyun struct bnxt_qplib_chip_ctx *cctx,
894*4882a593Smuzhiyun struct bnxt_qplib_stats *stats)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun memset(stats, 0, sizeof(*stats));
897*4882a593Smuzhiyun stats->fw_id = -1;
898*4882a593Smuzhiyun stats->size = cctx->hw_stats_size;
899*4882a593Smuzhiyun stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
900*4882a593Smuzhiyun &stats->dma_map, GFP_KERNEL);
901*4882a593Smuzhiyun if (!stats->dma) {
902*4882a593Smuzhiyun dev_err(&pdev->dev, "Stats DMA allocation failed\n");
903*4882a593Smuzhiyun return -ENOMEM;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
bnxt_qplib_cleanup_res(struct bnxt_qplib_res * res)908*4882a593Smuzhiyun void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
911*4882a593Smuzhiyun bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
bnxt_qplib_init_res(struct bnxt_qplib_res * res)914*4882a593Smuzhiyun int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
917*4882a593Smuzhiyun bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
bnxt_qplib_free_res(struct bnxt_qplib_res * res)922*4882a593Smuzhiyun void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
925*4882a593Smuzhiyun bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
926*4882a593Smuzhiyun bnxt_qplib_free_pd_tbl(&res->pd_tbl);
927*4882a593Smuzhiyun bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
bnxt_qplib_alloc_res(struct bnxt_qplib_res * res,struct pci_dev * pdev,struct net_device * netdev,struct bnxt_qplib_dev_attr * dev_attr)930*4882a593Smuzhiyun int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
931*4882a593Smuzhiyun struct net_device *netdev,
932*4882a593Smuzhiyun struct bnxt_qplib_dev_attr *dev_attr)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun int rc = 0;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun res->pdev = pdev;
937*4882a593Smuzhiyun res->netdev = netdev;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
940*4882a593Smuzhiyun if (rc)
941*4882a593Smuzhiyun goto fail;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
944*4882a593Smuzhiyun if (rc)
945*4882a593Smuzhiyun goto fail;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
948*4882a593Smuzhiyun if (rc)
949*4882a593Smuzhiyun goto fail;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
952*4882a593Smuzhiyun if (rc)
953*4882a593Smuzhiyun goto fail;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun fail:
957*4882a593Smuzhiyun bnxt_qplib_free_res(res);
958*4882a593Smuzhiyun return rc;
959*4882a593Smuzhiyun }
960