1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IBM Accelerator Family 'GenWQE'
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright IBM Corp. 2013
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
8*4882a593Smuzhiyun * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
9*4882a593Smuzhiyun * Author: Michael Jung <mijung@gmx.net>
10*4882a593Smuzhiyun * Author: Michael Ruettger <michael@ibmra.de>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * Miscelanous functionality used in the other GenWQE driver parts.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/page-flags.h>
21*4882a593Smuzhiyun #include <linux/scatterlist.h>
22*4882a593Smuzhiyun #include <linux/hugetlb.h>
23*4882a593Smuzhiyun #include <linux/iommu.h>
24*4882a593Smuzhiyun #include <linux/pci.h>
25*4882a593Smuzhiyun #include <linux/dma-mapping.h>
26*4882a593Smuzhiyun #include <linux/ctype.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/platform_device.h>
29*4882a593Smuzhiyun #include <linux/delay.h>
30*4882a593Smuzhiyun #include <linux/pgtable.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "genwqe_driver.h"
33*4882a593Smuzhiyun #include "card_base.h"
34*4882a593Smuzhiyun #include "card_ddcb.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun * __genwqe_writeq() - Write 64-bit register
38*4882a593Smuzhiyun * @cd: genwqe device descriptor
39*4882a593Smuzhiyun * @byte_offs: byte offset within BAR
40*4882a593Smuzhiyun * @val: 64-bit value
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Return: 0 if success; < 0 if error
43*4882a593Smuzhiyun */
__genwqe_writeq(struct genwqe_dev * cd,u64 byte_offs,u64 val)44*4882a593Smuzhiyun int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
49*4882a593Smuzhiyun return -EIO;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (cd->mmio == NULL)
52*4882a593Smuzhiyun return -EIO;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (pci_channel_offline(pci_dev))
55*4882a593Smuzhiyun return -EIO;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * __genwqe_readq() - Read 64-bit register
63*4882a593Smuzhiyun * @cd: genwqe device descriptor
64*4882a593Smuzhiyun * @byte_offs: offset within BAR
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * Return: value from register
67*4882a593Smuzhiyun */
__genwqe_readq(struct genwqe_dev * cd,u64 byte_offs)68*4882a593Smuzhiyun u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
71*4882a593Smuzhiyun return 0xffffffffffffffffull;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
74*4882a593Smuzhiyun (byte_offs == IO_SLC_CFGREG_GFIR))
75*4882a593Smuzhiyun return 0x000000000000ffffull;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
78*4882a593Smuzhiyun (byte_offs == IO_SLC_CFGREG_GFIR))
79*4882a593Smuzhiyun return 0x00000000ffff0000ull;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (cd->mmio == NULL)
82*4882a593Smuzhiyun return 0xffffffffffffffffull;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * __genwqe_writel() - Write 32-bit register
89*4882a593Smuzhiyun * @cd: genwqe device descriptor
90*4882a593Smuzhiyun * @byte_offs: byte offset within BAR
91*4882a593Smuzhiyun * @val: 32-bit value
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * Return: 0 if success; < 0 if error
94*4882a593Smuzhiyun */
__genwqe_writel(struct genwqe_dev * cd,u64 byte_offs,u32 val)95*4882a593Smuzhiyun int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
100*4882a593Smuzhiyun return -EIO;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (cd->mmio == NULL)
103*4882a593Smuzhiyun return -EIO;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (pci_channel_offline(pci_dev))
106*4882a593Smuzhiyun return -EIO;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * __genwqe_readl() - Read 32-bit register
114*4882a593Smuzhiyun * @cd: genwqe device descriptor
115*4882a593Smuzhiyun * @byte_offs: offset within BAR
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * Return: Value from register
118*4882a593Smuzhiyun */
__genwqe_readl(struct genwqe_dev * cd,u64 byte_offs)119*4882a593Smuzhiyun u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
122*4882a593Smuzhiyun return 0xffffffff;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (cd->mmio == NULL)
125*4882a593Smuzhiyun return 0xffffffff;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * genwqe_read_app_id() - Extract app_id
132*4882a593Smuzhiyun * @cd: genwqe device descriptor
133*4882a593Smuzhiyun * @app_name: carrier used to pass-back name
134*4882a593Smuzhiyun * @len: length of data for name
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * app_unitcfg need to be filled with valid data first
137*4882a593Smuzhiyun */
genwqe_read_app_id(struct genwqe_dev * cd,char * app_name,int len)138*4882a593Smuzhiyun int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun int i, j;
141*4882a593Smuzhiyun u32 app_id = (u32)cd->app_unitcfg;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun memset(app_name, 0, len);
144*4882a593Smuzhiyun for (i = 0, j = 0; j < min(len, 4); j++) {
145*4882a593Smuzhiyun char ch = (char)((app_id >> (24 - j*8)) & 0xff);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (ch == ' ')
148*4882a593Smuzhiyun continue;
149*4882a593Smuzhiyun app_name[i++] = isprint(ch) ? ch : 'X';
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun return i;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Existing kernel functions seem to use a different polynom,
158*4882a593Smuzhiyun * therefore we could not use them here.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * Genwqe's Polynomial = 0x20044009
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun #define CRC32_POLYNOMIAL 0x20044009
163*4882a593Smuzhiyun static u32 crc32_tab[256]; /* crc32 lookup table */
164*4882a593Smuzhiyun
genwqe_init_crc32(void)165*4882a593Smuzhiyun void genwqe_init_crc32(void)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun int i, j;
168*4882a593Smuzhiyun u32 crc;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (i = 0; i < 256; i++) {
171*4882a593Smuzhiyun crc = i << 24;
172*4882a593Smuzhiyun for (j = 0; j < 8; j++) {
173*4882a593Smuzhiyun if (crc & 0x80000000)
174*4882a593Smuzhiyun crc = (crc << 1) ^ CRC32_POLYNOMIAL;
175*4882a593Smuzhiyun else
176*4882a593Smuzhiyun crc = (crc << 1);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun crc32_tab[i] = crc;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
184*4882a593Smuzhiyun * @buff: pointer to data buffer
185*4882a593Smuzhiyun * @len: length of data for calculation
186*4882a593Smuzhiyun * @init: initial crc (0xffffffff at start)
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
189*4882a593Smuzhiyun *
190*4882a593Smuzhiyun * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
191*4882a593Smuzhiyun * result in a crc32 of 0xf33cb7d3.
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * The existing kernel crc functions did not cover this polynom yet.
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Return: crc32 checksum.
196*4882a593Smuzhiyun */
genwqe_crc32(u8 * buff,size_t len,u32 init)197*4882a593Smuzhiyun u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun int i;
200*4882a593Smuzhiyun u32 crc;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun crc = init;
203*4882a593Smuzhiyun while (len--) {
204*4882a593Smuzhiyun i = ((crc >> 24) ^ *buff++) & 0xFF;
205*4882a593Smuzhiyun crc = (crc << 8) ^ crc32_tab[i];
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun return crc;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
__genwqe_alloc_consistent(struct genwqe_dev * cd,size_t size,dma_addr_t * dma_handle)210*4882a593Smuzhiyun void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
211*4882a593Smuzhiyun dma_addr_t *dma_handle)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun if (get_order(size) >= MAX_ORDER)
214*4882a593Smuzhiyun return NULL;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
217*4882a593Smuzhiyun GFP_KERNEL);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
__genwqe_free_consistent(struct genwqe_dev * cd,size_t size,void * vaddr,dma_addr_t dma_handle)220*4882a593Smuzhiyun void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
221*4882a593Smuzhiyun void *vaddr, dma_addr_t dma_handle)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun if (vaddr == NULL)
224*4882a593Smuzhiyun return;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
genwqe_unmap_pages(struct genwqe_dev * cd,dma_addr_t * dma_list,int num_pages)229*4882a593Smuzhiyun static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
230*4882a593Smuzhiyun int num_pages)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun int i;
233*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
236*4882a593Smuzhiyun pci_unmap_page(pci_dev, dma_list[i],
237*4882a593Smuzhiyun PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
238*4882a593Smuzhiyun dma_list[i] = 0x0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
genwqe_map_pages(struct genwqe_dev * cd,struct page ** page_list,int num_pages,dma_addr_t * dma_list)242*4882a593Smuzhiyun static int genwqe_map_pages(struct genwqe_dev *cd,
243*4882a593Smuzhiyun struct page **page_list, int num_pages,
244*4882a593Smuzhiyun dma_addr_t *dma_list)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int i;
247*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* establish DMA mapping for requested pages */
250*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
251*4882a593Smuzhiyun dma_addr_t daddr;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun dma_list[i] = 0x0;
254*4882a593Smuzhiyun daddr = pci_map_page(pci_dev, page_list[i],
255*4882a593Smuzhiyun 0, /* map_offs */
256*4882a593Smuzhiyun PAGE_SIZE,
257*4882a593Smuzhiyun PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (pci_dma_mapping_error(pci_dev, daddr)) {
260*4882a593Smuzhiyun dev_err(&pci_dev->dev,
261*4882a593Smuzhiyun "[%s] err: no dma addr daddr=%016llx!\n",
262*4882a593Smuzhiyun __func__, (long long)daddr);
263*4882a593Smuzhiyun goto err;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun dma_list[i] = daddr;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun err:
271*4882a593Smuzhiyun genwqe_unmap_pages(cd, dma_list, num_pages);
272*4882a593Smuzhiyun return -EIO;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
genwqe_sgl_size(int num_pages)275*4882a593Smuzhiyun static int genwqe_sgl_size(int num_pages)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun int len, num_tlb = num_pages / 7;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
280*4882a593Smuzhiyun return roundup(len, PAGE_SIZE);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
285*4882a593Smuzhiyun *
286*4882a593Smuzhiyun * Allocates memory for sgl and overlapping pages. Pages which might
287*4882a593Smuzhiyun * overlap other user-space memory blocks are being cached for DMAs,
288*4882a593Smuzhiyun * such that we do not run into syncronization issues. Data is copied
289*4882a593Smuzhiyun * from user-space into the cached pages.
290*4882a593Smuzhiyun */
genwqe_alloc_sync_sgl(struct genwqe_dev * cd,struct genwqe_sgl * sgl,void __user * user_addr,size_t user_size,int write)291*4882a593Smuzhiyun int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
292*4882a593Smuzhiyun void __user *user_addr, size_t user_size, int write)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int ret = -ENOMEM;
295*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
298*4882a593Smuzhiyun sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
299*4882a593Smuzhiyun sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
300*4882a593Smuzhiyun sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
303*4882a593Smuzhiyun __func__, user_addr, user_size, sgl->nr_pages,
304*4882a593Smuzhiyun sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun sgl->user_addr = user_addr;
307*4882a593Smuzhiyun sgl->user_size = user_size;
308*4882a593Smuzhiyun sgl->write = write;
309*4882a593Smuzhiyun sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (get_order(sgl->sgl_size) > MAX_ORDER) {
312*4882a593Smuzhiyun dev_err(&pci_dev->dev,
313*4882a593Smuzhiyun "[%s] err: too much memory requested!\n", __func__);
314*4882a593Smuzhiyun return ret;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
318*4882a593Smuzhiyun &sgl->sgl_dma_addr);
319*4882a593Smuzhiyun if (sgl->sgl == NULL) {
320*4882a593Smuzhiyun dev_err(&pci_dev->dev,
321*4882a593Smuzhiyun "[%s] err: no memory available!\n", __func__);
322*4882a593Smuzhiyun return ret;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Only use buffering on incomplete pages */
326*4882a593Smuzhiyun if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
327*4882a593Smuzhiyun sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
328*4882a593Smuzhiyun &sgl->fpage_dma_addr);
329*4882a593Smuzhiyun if (sgl->fpage == NULL)
330*4882a593Smuzhiyun goto err_out;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Sync with user memory */
333*4882a593Smuzhiyun if (copy_from_user(sgl->fpage + sgl->fpage_offs,
334*4882a593Smuzhiyun user_addr, sgl->fpage_size)) {
335*4882a593Smuzhiyun ret = -EFAULT;
336*4882a593Smuzhiyun goto err_out;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun if (sgl->lpage_size != 0) {
340*4882a593Smuzhiyun sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
341*4882a593Smuzhiyun &sgl->lpage_dma_addr);
342*4882a593Smuzhiyun if (sgl->lpage == NULL)
343*4882a593Smuzhiyun goto err_out1;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Sync with user memory */
346*4882a593Smuzhiyun if (copy_from_user(sgl->lpage, user_addr + user_size -
347*4882a593Smuzhiyun sgl->lpage_size, sgl->lpage_size)) {
348*4882a593Smuzhiyun ret = -EFAULT;
349*4882a593Smuzhiyun goto err_out2;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun return 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun err_out2:
355*4882a593Smuzhiyun __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
356*4882a593Smuzhiyun sgl->lpage_dma_addr);
357*4882a593Smuzhiyun sgl->lpage = NULL;
358*4882a593Smuzhiyun sgl->lpage_dma_addr = 0;
359*4882a593Smuzhiyun err_out1:
360*4882a593Smuzhiyun __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361*4882a593Smuzhiyun sgl->fpage_dma_addr);
362*4882a593Smuzhiyun sgl->fpage = NULL;
363*4882a593Smuzhiyun sgl->fpage_dma_addr = 0;
364*4882a593Smuzhiyun err_out:
365*4882a593Smuzhiyun __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
366*4882a593Smuzhiyun sgl->sgl_dma_addr);
367*4882a593Smuzhiyun sgl->sgl = NULL;
368*4882a593Smuzhiyun sgl->sgl_dma_addr = 0;
369*4882a593Smuzhiyun sgl->sgl_size = 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
genwqe_setup_sgl(struct genwqe_dev * cd,struct genwqe_sgl * sgl,dma_addr_t * dma_list)374*4882a593Smuzhiyun int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
375*4882a593Smuzhiyun dma_addr_t *dma_list)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun int i = 0, j = 0, p;
378*4882a593Smuzhiyun unsigned long dma_offs, map_offs;
379*4882a593Smuzhiyun dma_addr_t prev_daddr = 0;
380*4882a593Smuzhiyun struct sg_entry *s, *last_s = NULL;
381*4882a593Smuzhiyun size_t size = sgl->user_size;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun dma_offs = 128; /* next block if needed/dma_offset */
384*4882a593Smuzhiyun map_offs = sgl->fpage_offs; /* offset in first page */
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun s = &sgl->sgl[0]; /* first set of 8 entries */
387*4882a593Smuzhiyun p = 0; /* page */
388*4882a593Smuzhiyun while (p < sgl->nr_pages) {
389*4882a593Smuzhiyun dma_addr_t daddr;
390*4882a593Smuzhiyun unsigned int size_to_map;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* always write the chaining entry, cleanup is done later */
393*4882a593Smuzhiyun j = 0;
394*4882a593Smuzhiyun s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
395*4882a593Smuzhiyun s[j].len = cpu_to_be32(128);
396*4882a593Smuzhiyun s[j].flags = cpu_to_be32(SG_CHAINED);
397*4882a593Smuzhiyun j++;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun while (j < 8) {
400*4882a593Smuzhiyun /* DMA mapping for requested page, offs, size */
401*4882a593Smuzhiyun size_to_map = min(size, PAGE_SIZE - map_offs);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun if ((p == 0) && (sgl->fpage != NULL)) {
404*4882a593Smuzhiyun daddr = sgl->fpage_dma_addr + map_offs;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun } else if ((p == sgl->nr_pages - 1) &&
407*4882a593Smuzhiyun (sgl->lpage != NULL)) {
408*4882a593Smuzhiyun daddr = sgl->lpage_dma_addr;
409*4882a593Smuzhiyun } else {
410*4882a593Smuzhiyun daddr = dma_list[p] + map_offs;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun size -= size_to_map;
414*4882a593Smuzhiyun map_offs = 0;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (prev_daddr == daddr) {
417*4882a593Smuzhiyun u32 prev_len = be32_to_cpu(last_s->len);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* pr_info("daddr combining: "
420*4882a593Smuzhiyun "%016llx/%08x -> %016llx\n",
421*4882a593Smuzhiyun prev_daddr, prev_len, daddr); */
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun last_s->len = cpu_to_be32(prev_len +
424*4882a593Smuzhiyun size_to_map);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun p++; /* process next page */
427*4882a593Smuzhiyun if (p == sgl->nr_pages)
428*4882a593Smuzhiyun goto fixup; /* nothing to do */
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun prev_daddr = daddr + size_to_map;
431*4882a593Smuzhiyun continue;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* start new entry */
435*4882a593Smuzhiyun s[j].target_addr = cpu_to_be64(daddr);
436*4882a593Smuzhiyun s[j].len = cpu_to_be32(size_to_map);
437*4882a593Smuzhiyun s[j].flags = cpu_to_be32(SG_DATA);
438*4882a593Smuzhiyun prev_daddr = daddr + size_to_map;
439*4882a593Smuzhiyun last_s = &s[j];
440*4882a593Smuzhiyun j++;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun p++; /* process next page */
443*4882a593Smuzhiyun if (p == sgl->nr_pages)
444*4882a593Smuzhiyun goto fixup; /* nothing to do */
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun dma_offs += 128;
447*4882a593Smuzhiyun s += 8; /* continue 8 elements further */
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun fixup:
450*4882a593Smuzhiyun if (j == 1) { /* combining happened on last entry! */
451*4882a593Smuzhiyun s -= 8; /* full shift needed on previous sgl block */
452*4882a593Smuzhiyun j = 7; /* shift all elements */
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun for (i = 0; i < j; i++) /* move elements 1 up */
456*4882a593Smuzhiyun s[i] = s[i + 1];
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun s[i].target_addr = cpu_to_be64(0);
459*4882a593Smuzhiyun s[i].len = cpu_to_be32(0);
460*4882a593Smuzhiyun s[i].flags = cpu_to_be32(SG_END_LIST);
461*4882a593Smuzhiyun return 0;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
466*4882a593Smuzhiyun * @cd: genwqe device descriptor
467*4882a593Smuzhiyun * @sgl: scatter gather list describing user-space memory
468*4882a593Smuzhiyun *
469*4882a593Smuzhiyun * After the DMA transfer has been completed we free the memory for
470*4882a593Smuzhiyun * the sgl and the cached pages. Data is being transferred from cached
471*4882a593Smuzhiyun * pages into user-space buffers.
472*4882a593Smuzhiyun */
genwqe_free_sync_sgl(struct genwqe_dev * cd,struct genwqe_sgl * sgl)473*4882a593Smuzhiyun int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun int rc = 0;
476*4882a593Smuzhiyun size_t offset;
477*4882a593Smuzhiyun unsigned long res;
478*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (sgl->fpage) {
481*4882a593Smuzhiyun if (sgl->write) {
482*4882a593Smuzhiyun res = copy_to_user(sgl->user_addr,
483*4882a593Smuzhiyun sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
484*4882a593Smuzhiyun if (res) {
485*4882a593Smuzhiyun dev_err(&pci_dev->dev,
486*4882a593Smuzhiyun "[%s] err: copying fpage! (res=%lu)\n",
487*4882a593Smuzhiyun __func__, res);
488*4882a593Smuzhiyun rc = -EFAULT;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
492*4882a593Smuzhiyun sgl->fpage_dma_addr);
493*4882a593Smuzhiyun sgl->fpage = NULL;
494*4882a593Smuzhiyun sgl->fpage_dma_addr = 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun if (sgl->lpage) {
497*4882a593Smuzhiyun if (sgl->write) {
498*4882a593Smuzhiyun offset = sgl->user_size - sgl->lpage_size;
499*4882a593Smuzhiyun res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
500*4882a593Smuzhiyun sgl->lpage_size);
501*4882a593Smuzhiyun if (res) {
502*4882a593Smuzhiyun dev_err(&pci_dev->dev,
503*4882a593Smuzhiyun "[%s] err: copying lpage! (res=%lu)\n",
504*4882a593Smuzhiyun __func__, res);
505*4882a593Smuzhiyun rc = -EFAULT;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
509*4882a593Smuzhiyun sgl->lpage_dma_addr);
510*4882a593Smuzhiyun sgl->lpage = NULL;
511*4882a593Smuzhiyun sgl->lpage_dma_addr = 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
514*4882a593Smuzhiyun sgl->sgl_dma_addr);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun sgl->sgl = NULL;
517*4882a593Smuzhiyun sgl->sgl_dma_addr = 0x0;
518*4882a593Smuzhiyun sgl->sgl_size = 0;
519*4882a593Smuzhiyun return rc;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
524*4882a593Smuzhiyun * @cd: pointer to genwqe device
525*4882a593Smuzhiyun * @m: mapping params
526*4882a593Smuzhiyun * @uaddr: user virtual address
527*4882a593Smuzhiyun * @size: size of memory to be mapped
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * We need to think about how we could speed this up. Of course it is
530*4882a593Smuzhiyun * not a good idea to do this over and over again, like we are
531*4882a593Smuzhiyun * currently doing it. Nevertheless, I am curious where on the path
532*4882a593Smuzhiyun * the performance is spend. Most probably within the memory
533*4882a593Smuzhiyun * allocation functions, but maybe also in the DMA mapping code.
534*4882a593Smuzhiyun *
535*4882a593Smuzhiyun * Restrictions: The maximum size of the possible mapping currently depends
536*4882a593Smuzhiyun * on the amount of memory we can get using kzalloc() for the
537*4882a593Smuzhiyun * page_list and pci_alloc_consistent for the sg_list.
538*4882a593Smuzhiyun * The sg_list is currently itself not scattered, which could
539*4882a593Smuzhiyun * be fixed with some effort. The page_list must be split into
540*4882a593Smuzhiyun * PAGE_SIZE chunks too. All that will make the complicated
541*4882a593Smuzhiyun * code more complicated.
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * Return: 0 if success
544*4882a593Smuzhiyun */
genwqe_user_vmap(struct genwqe_dev * cd,struct dma_mapping * m,void * uaddr,unsigned long size)545*4882a593Smuzhiyun int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
546*4882a593Smuzhiyun unsigned long size)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun int rc = -EINVAL;
549*4882a593Smuzhiyun unsigned long data, offs;
550*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if ((uaddr == NULL) || (size == 0)) {
553*4882a593Smuzhiyun m->size = 0; /* mark unused and not added */
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun m->u_vaddr = uaddr;
557*4882a593Smuzhiyun m->size = size;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* determine space needed for page_list. */
560*4882a593Smuzhiyun data = (unsigned long)uaddr;
561*4882a593Smuzhiyun offs = offset_in_page(data);
562*4882a593Smuzhiyun if (size > ULONG_MAX - PAGE_SIZE - offs) {
563*4882a593Smuzhiyun m->size = 0; /* mark unused and not added */
564*4882a593Smuzhiyun return -EINVAL;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun m->page_list = kcalloc(m->nr_pages,
569*4882a593Smuzhiyun sizeof(struct page *) + sizeof(dma_addr_t),
570*4882a593Smuzhiyun GFP_KERNEL);
571*4882a593Smuzhiyun if (!m->page_list) {
572*4882a593Smuzhiyun dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
573*4882a593Smuzhiyun m->nr_pages = 0;
574*4882a593Smuzhiyun m->u_vaddr = NULL;
575*4882a593Smuzhiyun m->size = 0; /* mark unused and not added */
576*4882a593Smuzhiyun return -ENOMEM;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* pin user pages in memory */
581*4882a593Smuzhiyun rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
582*4882a593Smuzhiyun m->nr_pages,
583*4882a593Smuzhiyun m->write ? FOLL_WRITE : 0, /* readable/writable */
584*4882a593Smuzhiyun m->page_list); /* ptrs to pages */
585*4882a593Smuzhiyun if (rc < 0)
586*4882a593Smuzhiyun goto fail_pin_user_pages;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* assumption: pin_user_pages can be killed by signals. */
589*4882a593Smuzhiyun if (rc < m->nr_pages) {
590*4882a593Smuzhiyun unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
591*4882a593Smuzhiyun rc = -EFAULT;
592*4882a593Smuzhiyun goto fail_pin_user_pages;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
596*4882a593Smuzhiyun if (rc != 0)
597*4882a593Smuzhiyun goto fail_free_user_pages;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun fail_free_user_pages:
602*4882a593Smuzhiyun unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun fail_pin_user_pages:
605*4882a593Smuzhiyun kfree(m->page_list);
606*4882a593Smuzhiyun m->page_list = NULL;
607*4882a593Smuzhiyun m->dma_list = NULL;
608*4882a593Smuzhiyun m->nr_pages = 0;
609*4882a593Smuzhiyun m->u_vaddr = NULL;
610*4882a593Smuzhiyun m->size = 0; /* mark unused and not added */
611*4882a593Smuzhiyun return rc;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
616*4882a593Smuzhiyun * memory
617*4882a593Smuzhiyun * @cd: pointer to genwqe device
618*4882a593Smuzhiyun * @m: mapping params
619*4882a593Smuzhiyun */
genwqe_user_vunmap(struct genwqe_dev * cd,struct dma_mapping * m)620*4882a593Smuzhiyun int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (!dma_mapping_used(m)) {
625*4882a593Smuzhiyun dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
626*4882a593Smuzhiyun __func__, m);
627*4882a593Smuzhiyun return -EINVAL;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (m->dma_list)
631*4882a593Smuzhiyun genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (m->page_list) {
634*4882a593Smuzhiyun unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
635*4882a593Smuzhiyun m->write);
636*4882a593Smuzhiyun kfree(m->page_list);
637*4882a593Smuzhiyun m->page_list = NULL;
638*4882a593Smuzhiyun m->dma_list = NULL;
639*4882a593Smuzhiyun m->nr_pages = 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun m->u_vaddr = NULL;
643*4882a593Smuzhiyun m->size = 0; /* mark as unused and not added */
644*4882a593Smuzhiyun return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /**
648*4882a593Smuzhiyun * genwqe_card_type() - Get chip type SLU Configuration Register
649*4882a593Smuzhiyun * @cd: pointer to the genwqe device descriptor
650*4882a593Smuzhiyun * Return: 0: Altera Stratix-IV 230
651*4882a593Smuzhiyun * 1: Altera Stratix-IV 530
652*4882a593Smuzhiyun * 2: Altera Stratix-V A4
653*4882a593Smuzhiyun * 3: Altera Stratix-V A7
654*4882a593Smuzhiyun */
genwqe_card_type(struct genwqe_dev * cd)655*4882a593Smuzhiyun u8 genwqe_card_type(struct genwqe_dev *cd)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun u64 card_type = cd->slu_unitcfg;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /**
663*4882a593Smuzhiyun * genwqe_card_reset() - Reset the card
664*4882a593Smuzhiyun * @cd: pointer to the genwqe device descriptor
665*4882a593Smuzhiyun */
genwqe_card_reset(struct genwqe_dev * cd)666*4882a593Smuzhiyun int genwqe_card_reset(struct genwqe_dev *cd)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun u64 softrst;
669*4882a593Smuzhiyun struct pci_dev *pci_dev = cd->pci_dev;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (!genwqe_is_privileged(cd))
672*4882a593Smuzhiyun return -ENODEV;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* new SL */
675*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
676*4882a593Smuzhiyun msleep(1000);
677*4882a593Smuzhiyun __genwqe_readq(cd, IO_HSU_FIR_CLR);
678*4882a593Smuzhiyun __genwqe_readq(cd, IO_APP_FIR_CLR);
679*4882a593Smuzhiyun __genwqe_readq(cd, IO_SLU_FIR_CLR);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * Read-modify-write to preserve the stealth bits
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * For SL >= 039, Stealth WE bit allows removing
685*4882a593Smuzhiyun * the read-modify-wrote.
686*4882a593Smuzhiyun * r-m-w may require a mask 0x3C to avoid hitting hard
687*4882a593Smuzhiyun * reset again for error reset (should be 0, chicken).
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
690*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /* give ERRORRESET some time to finish */
693*4882a593Smuzhiyun msleep(50);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (genwqe_need_err_masking(cd)) {
696*4882a593Smuzhiyun dev_info(&pci_dev->dev,
697*4882a593Smuzhiyun "[%s] masking errors for old bitstreams\n", __func__);
698*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun return 0;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
genwqe_read_softreset(struct genwqe_dev * cd)703*4882a593Smuzhiyun int genwqe_read_softreset(struct genwqe_dev *cd)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun u64 bitstream;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (!genwqe_is_privileged(cd))
708*4882a593Smuzhiyun return -ENODEV;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
711*4882a593Smuzhiyun cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /**
716*4882a593Smuzhiyun * genwqe_set_interrupt_capability() - Configure MSI capability structure
717*4882a593Smuzhiyun * @cd: pointer to the device
718*4882a593Smuzhiyun * @count: number of vectors to allocate
719*4882a593Smuzhiyun * Return: 0 if no error
720*4882a593Smuzhiyun */
genwqe_set_interrupt_capability(struct genwqe_dev * cd,int count)721*4882a593Smuzhiyun int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun int rc;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
726*4882a593Smuzhiyun if (rc < 0)
727*4882a593Smuzhiyun return rc;
728*4882a593Smuzhiyun return 0;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /**
732*4882a593Smuzhiyun * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
733*4882a593Smuzhiyun * @cd: pointer to the device
734*4882a593Smuzhiyun */
genwqe_reset_interrupt_capability(struct genwqe_dev * cd)735*4882a593Smuzhiyun void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun pci_free_irq_vectors(cd->pci_dev);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /**
741*4882a593Smuzhiyun * set_reg_idx() - Fill array with data. Ignore illegal offsets.
742*4882a593Smuzhiyun * @cd: card device
743*4882a593Smuzhiyun * @r: debug register array
744*4882a593Smuzhiyun * @i: index to desired entry
745*4882a593Smuzhiyun * @m: maximum possible entries
746*4882a593Smuzhiyun * @addr: addr which is read
747*4882a593Smuzhiyun * @idx: index in debug array
748*4882a593Smuzhiyun * @val: read value
749*4882a593Smuzhiyun */
set_reg_idx(struct genwqe_dev * cd,struct genwqe_reg * r,unsigned int * i,unsigned int m,u32 addr,u32 idx,u64 val)750*4882a593Smuzhiyun static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
751*4882a593Smuzhiyun unsigned int *i, unsigned int m, u32 addr, u32 idx,
752*4882a593Smuzhiyun u64 val)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun if (WARN_ON_ONCE(*i >= m))
755*4882a593Smuzhiyun return -EFAULT;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun r[*i].addr = addr;
758*4882a593Smuzhiyun r[*i].idx = idx;
759*4882a593Smuzhiyun r[*i].val = val;
760*4882a593Smuzhiyun ++*i;
761*4882a593Smuzhiyun return 0;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
set_reg(struct genwqe_dev * cd,struct genwqe_reg * r,unsigned int * i,unsigned int m,u32 addr,u64 val)764*4882a593Smuzhiyun static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
765*4882a593Smuzhiyun unsigned int *i, unsigned int m, u32 addr, u64 val)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun return set_reg_idx(cd, r, i, m, addr, 0, val);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
genwqe_read_ffdc_regs(struct genwqe_dev * cd,struct genwqe_reg * regs,unsigned int max_regs,int all)770*4882a593Smuzhiyun int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
771*4882a593Smuzhiyun unsigned int max_regs, int all)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun unsigned int i, j, idx = 0;
774*4882a593Smuzhiyun u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
775*4882a593Smuzhiyun u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* Global FIR */
778*4882a593Smuzhiyun gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
779*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* UnitCfg for SLU */
782*4882a593Smuzhiyun sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
783*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* UnitCfg for APP */
786*4882a593Smuzhiyun appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
787*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* Check all chip Units */
790*4882a593Smuzhiyun for (i = 0; i < GENWQE_MAX_UNITS; i++) {
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Unit FIR */
793*4882a593Smuzhiyun ufir_addr = (i << 24) | 0x008;
794*4882a593Smuzhiyun ufir = __genwqe_readq(cd, ufir_addr);
795*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* Unit FEC */
798*4882a593Smuzhiyun ufec_addr = (i << 24) | 0x018;
799*4882a593Smuzhiyun ufec = __genwqe_readq(cd, ufec_addr);
800*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun for (j = 0; j < 64; j++) {
803*4882a593Smuzhiyun /* wherever there is a primary 1, read the 2ndary */
804*4882a593Smuzhiyun if (!all && (!(ufir & (1ull << j))))
805*4882a593Smuzhiyun continue;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun sfir_addr = (i << 24) | (0x100 + 8 * j);
808*4882a593Smuzhiyun sfir = __genwqe_readq(cd, sfir_addr);
809*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun sfec_addr = (i << 24) | (0x300 + 8 * j);
812*4882a593Smuzhiyun sfec = __genwqe_readq(cd, sfec_addr);
813*4882a593Smuzhiyun set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* fill with invalid data until end */
818*4882a593Smuzhiyun for (i = idx; i < max_regs; i++) {
819*4882a593Smuzhiyun regs[i].addr = 0xffffffff;
820*4882a593Smuzhiyun regs[i].val = 0xffffffffffffffffull;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun return idx;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /**
826*4882a593Smuzhiyun * genwqe_ffdc_buff_size() - Calculates the number of dump registers
827*4882a593Smuzhiyun * @cd: genwqe device descriptor
828*4882a593Smuzhiyun * @uid: unit ID
829*4882a593Smuzhiyun */
genwqe_ffdc_buff_size(struct genwqe_dev * cd,int uid)830*4882a593Smuzhiyun int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun int entries = 0, ring, traps, traces, trace_entries;
833*4882a593Smuzhiyun u32 eevptr_addr, l_addr, d_len, d_type;
834*4882a593Smuzhiyun u64 eevptr, val, addr;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
837*4882a593Smuzhiyun eevptr = __genwqe_readq(cd, eevptr_addr);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if ((eevptr != 0x0) && (eevptr != -1ull)) {
840*4882a593Smuzhiyun l_addr = GENWQE_UID_OFFS(uid) | eevptr;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun while (1) {
843*4882a593Smuzhiyun val = __genwqe_readq(cd, l_addr);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if ((val == 0x0) || (val == -1ull))
846*4882a593Smuzhiyun break;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* 38:24 */
849*4882a593Smuzhiyun d_len = (val & 0x0000007fff000000ull) >> 24;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* 39 */
852*4882a593Smuzhiyun d_type = (val & 0x0000008000000000ull) >> 36;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (d_type) { /* repeat */
855*4882a593Smuzhiyun entries += d_len;
856*4882a593Smuzhiyun } else { /* size in bytes! */
857*4882a593Smuzhiyun entries += d_len >> 3;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun l_addr += 8;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun for (ring = 0; ring < 8; ring++) {
865*4882a593Smuzhiyun addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
866*4882a593Smuzhiyun val = __genwqe_readq(cd, addr);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun if ((val == 0x0ull) || (val == -1ull))
869*4882a593Smuzhiyun continue;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun traps = (val >> 24) & 0xff;
872*4882a593Smuzhiyun traces = (val >> 16) & 0xff;
873*4882a593Smuzhiyun trace_entries = val & 0xffff;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun entries += traps + (traces * trace_entries);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun return entries;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /**
881*4882a593Smuzhiyun * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
882*4882a593Smuzhiyun * @cd: genwqe device descriptor
883*4882a593Smuzhiyun * @uid: unit ID
884*4882a593Smuzhiyun * @regs: register information
885*4882a593Smuzhiyun * @max_regs: number of register entries
886*4882a593Smuzhiyun */
genwqe_ffdc_buff_read(struct genwqe_dev * cd,int uid,struct genwqe_reg * regs,unsigned int max_regs)887*4882a593Smuzhiyun int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
888*4882a593Smuzhiyun struct genwqe_reg *regs, unsigned int max_regs)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun int i, traps, traces, trace, trace_entries, trace_entry, ring;
891*4882a593Smuzhiyun unsigned int idx = 0;
892*4882a593Smuzhiyun u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
893*4882a593Smuzhiyun u64 eevptr, e, val, addr;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
896*4882a593Smuzhiyun eevptr = __genwqe_readq(cd, eevptr_addr);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
899*4882a593Smuzhiyun l_addr = GENWQE_UID_OFFS(uid) | eevptr;
900*4882a593Smuzhiyun while (1) {
901*4882a593Smuzhiyun e = __genwqe_readq(cd, l_addr);
902*4882a593Smuzhiyun if ((e == 0x0) || (e == 0xffffffffffffffffull))
903*4882a593Smuzhiyun break;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
906*4882a593Smuzhiyun d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
907*4882a593Smuzhiyun d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
908*4882a593Smuzhiyun d_addr |= GENWQE_UID_OFFS(uid);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (d_type) {
911*4882a593Smuzhiyun for (i = 0; i < (int)d_len; i++) {
912*4882a593Smuzhiyun val = __genwqe_readq(cd, d_addr);
913*4882a593Smuzhiyun set_reg_idx(cd, regs, &idx, max_regs,
914*4882a593Smuzhiyun d_addr, i, val);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun } else {
917*4882a593Smuzhiyun d_len >>= 3; /* Size in bytes! */
918*4882a593Smuzhiyun for (i = 0; i < (int)d_len; i++, d_addr += 8) {
919*4882a593Smuzhiyun val = __genwqe_readq(cd, d_addr);
920*4882a593Smuzhiyun set_reg_idx(cd, regs, &idx, max_regs,
921*4882a593Smuzhiyun d_addr, 0, val);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun l_addr += 8;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * To save time, there are only 6 traces poplulated on Uid=2,
930*4882a593Smuzhiyun * Ring=1. each with iters=512.
931*4882a593Smuzhiyun */
932*4882a593Smuzhiyun for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
933*4882a593Smuzhiyun 2...7 are ASI rings */
934*4882a593Smuzhiyun addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
935*4882a593Smuzhiyun val = __genwqe_readq(cd, addr);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if ((val == 0x0ull) || (val == -1ull))
938*4882a593Smuzhiyun continue;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun traps = (val >> 24) & 0xff; /* Number of Traps */
941*4882a593Smuzhiyun traces = (val >> 16) & 0xff; /* Number of Traces */
942*4882a593Smuzhiyun trace_entries = val & 0xffff; /* Entries per trace */
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Note: This is a combined loop that dumps both the traps */
945*4882a593Smuzhiyun /* (for the trace == 0 case) as well as the traces 1 to */
946*4882a593Smuzhiyun /* 'traces'. */
947*4882a593Smuzhiyun for (trace = 0; trace <= traces; trace++) {
948*4882a593Smuzhiyun u32 diag_sel =
949*4882a593Smuzhiyun GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun addr = (GENWQE_UID_OFFS(uid) |
952*4882a593Smuzhiyun IO_EXTENDED_DIAG_SELECTOR);
953*4882a593Smuzhiyun __genwqe_writeq(cd, addr, diag_sel);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun for (trace_entry = 0;
956*4882a593Smuzhiyun trace_entry < (trace ? trace_entries : traps);
957*4882a593Smuzhiyun trace_entry++) {
958*4882a593Smuzhiyun addr = (GENWQE_UID_OFFS(uid) |
959*4882a593Smuzhiyun IO_EXTENDED_DIAG_READ_MBX);
960*4882a593Smuzhiyun val = __genwqe_readq(cd, addr);
961*4882a593Smuzhiyun set_reg_idx(cd, regs, &idx, max_regs, addr,
962*4882a593Smuzhiyun (diag_sel<<16) | trace_entry, val);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun return 0;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /**
970*4882a593Smuzhiyun * genwqe_write_vreg() - Write register in virtual window
971*4882a593Smuzhiyun * @cd: genwqe device descriptor
972*4882a593Smuzhiyun * @reg: register (byte) offset within BAR
973*4882a593Smuzhiyun * @val: value to write
974*4882a593Smuzhiyun * @func: PCI virtual function
975*4882a593Smuzhiyun *
976*4882a593Smuzhiyun * Note, these registers are only accessible to the PF through the
977*4882a593Smuzhiyun * VF-window. It is not intended for the VF to access.
978*4882a593Smuzhiyun */
genwqe_write_vreg(struct genwqe_dev * cd,u32 reg,u64 val,int func)979*4882a593Smuzhiyun int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
982*4882a593Smuzhiyun __genwqe_writeq(cd, reg, val);
983*4882a593Smuzhiyun return 0;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /**
987*4882a593Smuzhiyun * genwqe_read_vreg() - Read register in virtual window
988*4882a593Smuzhiyun * @cd: genwqe device descriptor
989*4882a593Smuzhiyun * @reg: register (byte) offset within BAR
990*4882a593Smuzhiyun * @func: PCI virtual function
991*4882a593Smuzhiyun *
992*4882a593Smuzhiyun * Note, these registers are only accessible to the PF through the
993*4882a593Smuzhiyun * VF-window. It is not intended for the VF to access.
994*4882a593Smuzhiyun */
genwqe_read_vreg(struct genwqe_dev * cd,u32 reg,int func)995*4882a593Smuzhiyun u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
998*4882a593Smuzhiyun return __genwqe_readq(cd, reg);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
1003*4882a593Smuzhiyun * @cd: genwqe device descriptor
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * Note: From a design perspective it turned out to be a bad idea to
1006*4882a593Smuzhiyun * use codes here to specifiy the frequency/speed values. An old
1007*4882a593Smuzhiyun * driver cannot understand new codes and is therefore always a
1008*4882a593Smuzhiyun * problem. Better is to measure out the value or put the
1009*4882a593Smuzhiyun * speed/frequency directly into a register which is always a valid
1010*4882a593Smuzhiyun * value for old as well as for new software.
1011*4882a593Smuzhiyun *
1012*4882a593Smuzhiyun * Return: Card clock in MHz
1013*4882a593Smuzhiyun */
genwqe_base_clock_frequency(struct genwqe_dev * cd)1014*4882a593Smuzhiyun int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun u16 speed; /* MHz MHz MHz MHz */
1017*4882a593Smuzhiyun static const int speed_grade[] = { 250, 200, 166, 175 };
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1020*4882a593Smuzhiyun if (speed >= ARRAY_SIZE(speed_grade))
1021*4882a593Smuzhiyun return 0; /* illegal value */
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun return speed_grade[speed];
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /**
1027*4882a593Smuzhiyun * genwqe_stop_traps() - Stop traps
1028*4882a593Smuzhiyun * @cd: genwqe device descriptor
1029*4882a593Smuzhiyun *
1030*4882a593Smuzhiyun * Before reading out the analysis data, we need to stop the traps.
1031*4882a593Smuzhiyun */
genwqe_stop_traps(struct genwqe_dev * cd)1032*4882a593Smuzhiyun void genwqe_stop_traps(struct genwqe_dev *cd)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun * genwqe_start_traps() - Start traps
1039*4882a593Smuzhiyun * @cd: genwqe device descriptor
1040*4882a593Smuzhiyun *
1041*4882a593Smuzhiyun * After having read the data, we can/must enable the traps again.
1042*4882a593Smuzhiyun */
genwqe_start_traps(struct genwqe_dev * cd)1043*4882a593Smuzhiyun void genwqe_start_traps(struct genwqe_dev *cd)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun if (genwqe_need_err_masking(cd))
1048*4882a593Smuzhiyun __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1049*4882a593Smuzhiyun }
1050