xref: /OK3568_Linux_fs/kernel/drivers/crypto/cavium/zip/zip_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /***********************license start************************************
2*4882a593Smuzhiyun  * Copyright (c) 2003-2017 Cavium, Inc.
3*4882a593Smuzhiyun  * All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This file is provided under the terms of the Cavium License (see below)
8*4882a593Smuzhiyun  * or under the terms of GNU General Public License, Version 2, as
9*4882a593Smuzhiyun  * published by the Free Software Foundation. When using or redistributing
10*4882a593Smuzhiyun  * this file, you may do so under either license.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Cavium License:  Redistribution and use in source and binary forms, with
13*4882a593Smuzhiyun  * or without modification, are permitted provided that the following
14*4882a593Smuzhiyun  * conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *  * Redistributions of source code must retain the above copyright
17*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *  * Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *    copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *    disclaimer in the documentation and/or other materials provided
22*4882a593Smuzhiyun  *    with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  *  * Neither the name of Cavium Inc. nor the names of its contributors may be
25*4882a593Smuzhiyun  *    used to endorse or promote products derived from this software without
26*4882a593Smuzhiyun  *    specific prior written permission.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * This Software, including technical data, may be subject to U.S. export
29*4882a593Smuzhiyun  * control laws, including the U.S. Export Administration Act and its
30*4882a593Smuzhiyun  * associated regulations, and may be subject to export or import
31*4882a593Smuzhiyun  * regulations in other countries.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34*4882a593Smuzhiyun  * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35*4882a593Smuzhiyun  * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36*4882a593Smuzhiyun  * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37*4882a593Smuzhiyun  * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38*4882a593Smuzhiyun  * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39*4882a593Smuzhiyun  * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40*4882a593Smuzhiyun  * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41*4882a593Smuzhiyun  * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42*4882a593Smuzhiyun  * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43*4882a593Smuzhiyun  * WITH YOU.
44*4882a593Smuzhiyun  ***********************license end**************************************/
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include "common.h"
47*4882a593Smuzhiyun #include "zip_crypto.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define DRV_NAME		"ThunderX-ZIP"
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static const struct pci_device_id zip_id_table[] = {
54*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
55*4882a593Smuzhiyun 	{ 0, }
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
zip_reg_write(u64 val,u64 __iomem * addr)58*4882a593Smuzhiyun void zip_reg_write(u64 val, u64 __iomem *addr)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	writeq(val, addr);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
zip_reg_read(u64 __iomem * addr)63*4882a593Smuzhiyun u64 zip_reg_read(u64 __iomem *addr)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	return readq(addr);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * Allocates new ZIP device structure
70*4882a593Smuzhiyun  * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
71*4882a593Smuzhiyun  */
zip_alloc_device(struct pci_dev * pdev)72*4882a593Smuzhiyun static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct zip_device *zip = NULL;
75*4882a593Smuzhiyun 	int idx;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
78*4882a593Smuzhiyun 		if (!zip_dev[idx])
79*4882a593Smuzhiyun 			break;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* To ensure that the index is within the limit */
83*4882a593Smuzhiyun 	if (idx < MAX_ZIP_DEVICES)
84*4882a593Smuzhiyun 		zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!zip)
87*4882a593Smuzhiyun 		return NULL;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	zip_dev[idx] = zip;
90*4882a593Smuzhiyun 	zip->index = idx;
91*4882a593Smuzhiyun 	return zip;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * zip_get_device - Get ZIP device based on node id of cpu
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * @node: Node id of the current cpu
98*4882a593Smuzhiyun  * Return: Pointer to Zip device structure
99*4882a593Smuzhiyun  */
zip_get_device(int node)100*4882a593Smuzhiyun struct zip_device *zip_get_device(int node)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	if ((node < MAX_ZIP_DEVICES) && (node >= 0))
103*4882a593Smuzhiyun 		return zip_dev[node];
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	zip_err("ZIP device not found for node id %d\n", node);
106*4882a593Smuzhiyun 	return NULL;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  * zip_get_node_id - Get the node id of the current cpu
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * Return: Node id of the current cpu
113*4882a593Smuzhiyun  */
zip_get_node_id(void)114*4882a593Smuzhiyun int zip_get_node_id(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	return cpu_to_node(raw_smp_processor_id());
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* Initializes the ZIP h/w sub-system */
zip_init_hw(struct zip_device * zip)120*4882a593Smuzhiyun static int zip_init_hw(struct zip_device *zip)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	union zip_cmd_ctl    cmd_ctl;
123*4882a593Smuzhiyun 	union zip_constants  constants;
124*4882a593Smuzhiyun 	union zip_que_ena    que_ena;
125*4882a593Smuzhiyun 	union zip_quex_map   que_map;
126*4882a593Smuzhiyun 	union zip_que_pri    que_pri;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	union zip_quex_sbuf_addr que_sbuf_addr;
129*4882a593Smuzhiyun 	union zip_quex_sbuf_ctl  que_sbuf_ctl;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	int q = 0;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* Enable the ZIP Engine(Core) Clock */
134*4882a593Smuzhiyun 	cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
135*4882a593Smuzhiyun 	cmd_ctl.s.forceclk = 1;
136*4882a593Smuzhiyun 	zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	zip_msg("ZIP_CMD_CTL  : 0x%016llx",
139*4882a593Smuzhiyun 		zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
142*4882a593Smuzhiyun 	zip->depth    = constants.s.depth;
143*4882a593Smuzhiyun 	zip->onfsize  = constants.s.onfsize;
144*4882a593Smuzhiyun 	zip->ctxsize  = constants.s.ctxsize;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
147*4882a593Smuzhiyun 		zip->depth, zip->onfsize, zip->ctxsize);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/*
150*4882a593Smuzhiyun 	 * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
151*4882a593Smuzhiyun 	 * have the correct buffer pointer and size configured for each
152*4882a593Smuzhiyun 	 * instruction queue.
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
155*4882a593Smuzhiyun 		que_sbuf_ctl.u_reg64 = 0ull;
156*4882a593Smuzhiyun 		que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
157*4882a593Smuzhiyun 		que_sbuf_ctl.s.inst_be   = 0;
158*4882a593Smuzhiyun 		que_sbuf_ctl.s.stream_id = 0;
159*4882a593Smuzhiyun 		zip_reg_write(que_sbuf_ctl.u_reg64,
160*4882a593Smuzhiyun 			      (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
163*4882a593Smuzhiyun 			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
167*4882a593Smuzhiyun 		memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		spin_lock_init(&zip->iq[q].lock);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		if (zip_cmd_qbuf_alloc(zip, q)) {
172*4882a593Smuzhiyun 			while (q != 0) {
173*4882a593Smuzhiyun 				q--;
174*4882a593Smuzhiyun 				zip_cmd_qbuf_free(zip, q);
175*4882a593Smuzhiyun 			}
176*4882a593Smuzhiyun 			return -ENOMEM;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		/* Initialize tail ptr to head */
180*4882a593Smuzhiyun 		zip->iq[q].sw_tail = zip->iq[q].sw_head;
181*4882a593Smuzhiyun 		zip->iq[q].hw_tail = zip->iq[q].sw_head;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		/* Write the physical addr to register */
184*4882a593Smuzhiyun 		que_sbuf_addr.u_reg64   = 0ull;
185*4882a593Smuzhiyun 		que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
186*4882a593Smuzhiyun 				       ZIP_128B_ALIGN);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
189*4882a593Smuzhiyun 			(u64)que_sbuf_addr.s.ptr);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		zip_reg_write(que_sbuf_addr.u_reg64,
192*4882a593Smuzhiyun 			      (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
195*4882a593Smuzhiyun 			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
198*4882a593Smuzhiyun 			zip->iq[q].sw_head, zip->iq[q].sw_tail,
199*4882a593Smuzhiyun 			zip->iq[q].hw_tail);
200*4882a593Smuzhiyun 		zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/*
204*4882a593Smuzhiyun 	 * Queue-to-ZIP core mapping
205*4882a593Smuzhiyun 	 * If a queue is not mapped to a particular core, it is equivalent to
206*4882a593Smuzhiyun 	 * the ZIP core being disabled.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	que_ena.u_reg64 = 0x0ull;
209*4882a593Smuzhiyun 	/* Enabling queues based on ZIP_NUM_QUEUES */
210*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++)
211*4882a593Smuzhiyun 		que_ena.s.ena |= (0x1 << q);
212*4882a593Smuzhiyun 	zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	zip_msg("QUE_ENA      : 0x%016llx",
215*4882a593Smuzhiyun 		zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
218*4882a593Smuzhiyun 		que_map.u_reg64 = 0ull;
219*4882a593Smuzhiyun 		/* Mapping each queue to two ZIP cores */
220*4882a593Smuzhiyun 		que_map.s.zce = 0x3;
221*4882a593Smuzhiyun 		zip_reg_write(que_map.u_reg64,
222*4882a593Smuzhiyun 			      (zip->reg_base + ZIP_QUEX_MAP(q)));
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		zip_msg("QUE_MAP(%d)   : 0x%016llx", q,
225*4882a593Smuzhiyun 			zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	que_pri.u_reg64 = 0ull;
229*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++)
230*4882a593Smuzhiyun 		que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
231*4882a593Smuzhiyun 	zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
zip_probe(struct pci_dev * pdev,const struct pci_device_id * ent)238*4882a593Smuzhiyun static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
241*4882a593Smuzhiyun 	struct zip_device *zip = NULL;
242*4882a593Smuzhiyun 	int    err;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	zip = zip_alloc_device(pdev);
245*4882a593Smuzhiyun 	if (!zip)
246*4882a593Smuzhiyun 		return -ENOMEM;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
249*4882a593Smuzhiyun 		 pdev->vendor, pdev->device, dev_to_node(dev));
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	pci_set_drvdata(pdev, zip);
252*4882a593Smuzhiyun 	zip->pdev = pdev;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
255*4882a593Smuzhiyun 	if (err) {
256*4882a593Smuzhiyun 		dev_err(dev, "Failed to enable PCI device");
257*4882a593Smuzhiyun 		goto err_free_device;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	err = pci_request_regions(pdev, DRV_NAME);
261*4882a593Smuzhiyun 	if (err) {
262*4882a593Smuzhiyun 		dev_err(dev, "PCI request regions failed 0x%x", err);
263*4882a593Smuzhiyun 		goto err_disable_device;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
267*4882a593Smuzhiyun 	if (err) {
268*4882a593Smuzhiyun 		dev_err(dev, "Unable to get usable DMA configuration\n");
269*4882a593Smuzhiyun 		goto err_release_regions;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
273*4882a593Smuzhiyun 	if (err) {
274*4882a593Smuzhiyun 		dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
275*4882a593Smuzhiyun 		goto err_release_regions;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/* MAP configuration registers */
279*4882a593Smuzhiyun 	zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
280*4882a593Smuzhiyun 	if (!zip->reg_base) {
281*4882a593Smuzhiyun 		dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
282*4882a593Smuzhiyun 		err = -ENOMEM;
283*4882a593Smuzhiyun 		goto err_release_regions;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* Initialize ZIP Hardware */
287*4882a593Smuzhiyun 	err = zip_init_hw(zip);
288*4882a593Smuzhiyun 	if (err)
289*4882a593Smuzhiyun 		goto err_release_regions;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return 0;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun err_release_regions:
294*4882a593Smuzhiyun 	if (zip->reg_base)
295*4882a593Smuzhiyun 		iounmap(zip->reg_base);
296*4882a593Smuzhiyun 	pci_release_regions(pdev);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun err_disable_device:
299*4882a593Smuzhiyun 	pci_disable_device(pdev);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun err_free_device:
302*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Remove zip_dev from zip_device list, free the zip_device memory */
305*4882a593Smuzhiyun 	zip_dev[zip->index] = NULL;
306*4882a593Smuzhiyun 	devm_kfree(dev, zip);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return err;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
zip_remove(struct pci_dev * pdev)311*4882a593Smuzhiyun static void zip_remove(struct pci_dev *pdev)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct zip_device *zip = pci_get_drvdata(pdev);
314*4882a593Smuzhiyun 	union zip_cmd_ctl cmd_ctl;
315*4882a593Smuzhiyun 	int q = 0;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (!zip)
318*4882a593Smuzhiyun 		return;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (zip->reg_base) {
321*4882a593Smuzhiyun 		cmd_ctl.u_reg64 = 0x0ull;
322*4882a593Smuzhiyun 		cmd_ctl.s.reset = 1;  /* Forces ZIP cores to do reset */
323*4882a593Smuzhiyun 		zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
324*4882a593Smuzhiyun 		iounmap(zip->reg_base);
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	pci_release_regions(pdev);
328*4882a593Smuzhiyun 	pci_disable_device(pdev);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/*
331*4882a593Smuzhiyun 	 * Free Command Queue buffers. This free should be called for all
332*4882a593Smuzhiyun 	 * the enabled Queues.
333*4882a593Smuzhiyun 	 */
334*4882a593Smuzhiyun 	for (q = 0; q < ZIP_NUM_QUEUES; q++)
335*4882a593Smuzhiyun 		zip_cmd_qbuf_free(zip, q);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
338*4882a593Smuzhiyun 	/* remove zip device from zip device list */
339*4882a593Smuzhiyun 	zip_dev[zip->index] = NULL;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /* PCI Sub-System Interface */
343*4882a593Smuzhiyun static struct pci_driver zip_driver = {
344*4882a593Smuzhiyun 	.name	    =  DRV_NAME,
345*4882a593Smuzhiyun 	.id_table   =  zip_id_table,
346*4882a593Smuzhiyun 	.probe	    =  zip_probe,
347*4882a593Smuzhiyun 	.remove     =  zip_remove,
348*4882a593Smuzhiyun };
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /* Kernel Crypto Subsystem Interface */
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun static struct crypto_alg zip_comp_deflate = {
353*4882a593Smuzhiyun 	.cra_name		= "deflate",
354*4882a593Smuzhiyun 	.cra_driver_name	= "deflate-cavium",
355*4882a593Smuzhiyun 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
356*4882a593Smuzhiyun 	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
357*4882a593Smuzhiyun 	.cra_priority           = 300,
358*4882a593Smuzhiyun 	.cra_module		= THIS_MODULE,
359*4882a593Smuzhiyun 	.cra_init		= zip_alloc_comp_ctx_deflate,
360*4882a593Smuzhiyun 	.cra_exit		= zip_free_comp_ctx,
361*4882a593Smuzhiyun 	.cra_u			= { .compress = {
362*4882a593Smuzhiyun 		.coa_compress	= zip_comp_compress,
363*4882a593Smuzhiyun 		.coa_decompress	= zip_comp_decompress
364*4882a593Smuzhiyun 		 } }
365*4882a593Smuzhiyun };
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun static struct crypto_alg zip_comp_lzs = {
368*4882a593Smuzhiyun 	.cra_name		= "lzs",
369*4882a593Smuzhiyun 	.cra_driver_name	= "lzs-cavium",
370*4882a593Smuzhiyun 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
371*4882a593Smuzhiyun 	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
372*4882a593Smuzhiyun 	.cra_priority           = 300,
373*4882a593Smuzhiyun 	.cra_module		= THIS_MODULE,
374*4882a593Smuzhiyun 	.cra_init		= zip_alloc_comp_ctx_lzs,
375*4882a593Smuzhiyun 	.cra_exit		= zip_free_comp_ctx,
376*4882a593Smuzhiyun 	.cra_u			= { .compress = {
377*4882a593Smuzhiyun 		.coa_compress	= zip_comp_compress,
378*4882a593Smuzhiyun 		.coa_decompress	= zip_comp_decompress
379*4882a593Smuzhiyun 		 } }
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun static struct scomp_alg zip_scomp_deflate = {
383*4882a593Smuzhiyun 	.alloc_ctx		= zip_alloc_scomp_ctx_deflate,
384*4882a593Smuzhiyun 	.free_ctx		= zip_free_scomp_ctx,
385*4882a593Smuzhiyun 	.compress		= zip_scomp_compress,
386*4882a593Smuzhiyun 	.decompress		= zip_scomp_decompress,
387*4882a593Smuzhiyun 	.base			= {
388*4882a593Smuzhiyun 		.cra_name		= "deflate",
389*4882a593Smuzhiyun 		.cra_driver_name	= "deflate-scomp-cavium",
390*4882a593Smuzhiyun 		.cra_module		= THIS_MODULE,
391*4882a593Smuzhiyun 		.cra_priority           = 300,
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun static struct scomp_alg zip_scomp_lzs = {
396*4882a593Smuzhiyun 	.alloc_ctx		= zip_alloc_scomp_ctx_lzs,
397*4882a593Smuzhiyun 	.free_ctx		= zip_free_scomp_ctx,
398*4882a593Smuzhiyun 	.compress		= zip_scomp_compress,
399*4882a593Smuzhiyun 	.decompress		= zip_scomp_decompress,
400*4882a593Smuzhiyun 	.base			= {
401*4882a593Smuzhiyun 		.cra_name		= "lzs",
402*4882a593Smuzhiyun 		.cra_driver_name	= "lzs-scomp-cavium",
403*4882a593Smuzhiyun 		.cra_module		= THIS_MODULE,
404*4882a593Smuzhiyun 		.cra_priority           = 300,
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun };
407*4882a593Smuzhiyun 
zip_register_compression_device(void)408*4882a593Smuzhiyun static int zip_register_compression_device(void)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	int ret;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	ret = crypto_register_alg(&zip_comp_deflate);
413*4882a593Smuzhiyun 	if (ret < 0) {
414*4882a593Smuzhiyun 		zip_err("Deflate algorithm registration failed\n");
415*4882a593Smuzhiyun 		return ret;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	ret = crypto_register_alg(&zip_comp_lzs);
419*4882a593Smuzhiyun 	if (ret < 0) {
420*4882a593Smuzhiyun 		zip_err("LZS algorithm registration failed\n");
421*4882a593Smuzhiyun 		goto err_unregister_alg_deflate;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	ret = crypto_register_scomp(&zip_scomp_deflate);
425*4882a593Smuzhiyun 	if (ret < 0) {
426*4882a593Smuzhiyun 		zip_err("Deflate scomp algorithm registration failed\n");
427*4882a593Smuzhiyun 		goto err_unregister_alg_lzs;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	ret = crypto_register_scomp(&zip_scomp_lzs);
431*4882a593Smuzhiyun 	if (ret < 0) {
432*4882a593Smuzhiyun 		zip_err("LZS scomp algorithm registration failed\n");
433*4882a593Smuzhiyun 		goto err_unregister_scomp_deflate;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return ret;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun err_unregister_scomp_deflate:
439*4882a593Smuzhiyun 	crypto_unregister_scomp(&zip_scomp_deflate);
440*4882a593Smuzhiyun err_unregister_alg_lzs:
441*4882a593Smuzhiyun 	crypto_unregister_alg(&zip_comp_lzs);
442*4882a593Smuzhiyun err_unregister_alg_deflate:
443*4882a593Smuzhiyun 	crypto_unregister_alg(&zip_comp_deflate);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return ret;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
zip_unregister_compression_device(void)448*4882a593Smuzhiyun static void zip_unregister_compression_device(void)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	crypto_unregister_alg(&zip_comp_deflate);
451*4882a593Smuzhiyun 	crypto_unregister_alg(&zip_comp_lzs);
452*4882a593Smuzhiyun 	crypto_unregister_scomp(&zip_scomp_deflate);
453*4882a593Smuzhiyun 	crypto_unregister_scomp(&zip_scomp_lzs);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun  * debugfs functions
458*4882a593Smuzhiyun  */
459*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
460*4882a593Smuzhiyun #include <linux/debugfs.h>
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /* Displays ZIP device statistics */
zip_stats_show(struct seq_file * s,void * unused)463*4882a593Smuzhiyun static int zip_stats_show(struct seq_file *s, void *unused)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	u64 val = 0ull;
466*4882a593Smuzhiyun 	u64 avg_chunk = 0ull, avg_cr = 0ull;
467*4882a593Smuzhiyun 	u32 q = 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	int index  = 0;
470*4882a593Smuzhiyun 	struct zip_device *zip;
471*4882a593Smuzhiyun 	struct zip_stats  *st;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
474*4882a593Smuzhiyun 		u64 pending = 0;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		if (zip_dev[index]) {
477*4882a593Smuzhiyun 			zip = zip_dev[index];
478*4882a593Smuzhiyun 			st  = &zip->stats;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 			/* Get all the pending requests */
481*4882a593Smuzhiyun 			for (q = 0; q < ZIP_NUM_QUEUES; q++) {
482*4882a593Smuzhiyun 				val = zip_reg_read((zip->reg_base +
483*4882a593Smuzhiyun 						    ZIP_DBG_QUEX_STA(q)));
484*4882a593Smuzhiyun 				pending += val >> 32 & 0xffffff;
485*4882a593Smuzhiyun 			}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 			val = atomic64_read(&st->comp_req_complete);
488*4882a593Smuzhiyun 			avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 			val = atomic64_read(&st->comp_out_bytes);
491*4882a593Smuzhiyun 			avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
492*4882a593Smuzhiyun 			seq_printf(s, "        ZIP Device %d Stats\n"
493*4882a593Smuzhiyun 				      "-----------------------------------\n"
494*4882a593Smuzhiyun 				      "Comp Req Submitted        : \t%lld\n"
495*4882a593Smuzhiyun 				      "Comp Req Completed        : \t%lld\n"
496*4882a593Smuzhiyun 				      "Compress In Bytes         : \t%lld\n"
497*4882a593Smuzhiyun 				      "Compressed Out Bytes      : \t%lld\n"
498*4882a593Smuzhiyun 				      "Average Chunk size        : \t%llu\n"
499*4882a593Smuzhiyun 				      "Average Compression ratio : \t%llu\n"
500*4882a593Smuzhiyun 				      "Decomp Req Submitted      : \t%lld\n"
501*4882a593Smuzhiyun 				      "Decomp Req Completed      : \t%lld\n"
502*4882a593Smuzhiyun 				      "Decompress In Bytes       : \t%lld\n"
503*4882a593Smuzhiyun 				      "Decompressed Out Bytes    : \t%lld\n"
504*4882a593Smuzhiyun 				      "Decompress Bad requests   : \t%lld\n"
505*4882a593Smuzhiyun 				      "Pending Req               : \t%lld\n"
506*4882a593Smuzhiyun 					"---------------------------------\n",
507*4882a593Smuzhiyun 				       index,
508*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->comp_req_submit),
509*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->comp_req_complete),
510*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->comp_in_bytes),
511*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->comp_out_bytes),
512*4882a593Smuzhiyun 				       avg_chunk,
513*4882a593Smuzhiyun 				       avg_cr,
514*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->decomp_req_submit),
515*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->decomp_req_complete),
516*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->decomp_in_bytes),
517*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->decomp_out_bytes),
518*4882a593Smuzhiyun 				       (u64)atomic64_read(&st->decomp_bad_reqs),
519*4882a593Smuzhiyun 				       pending);
520*4882a593Smuzhiyun 		}
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 	return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun /* Clears stats data */
zip_clear_show(struct seq_file * s,void * unused)526*4882a593Smuzhiyun static int zip_clear_show(struct seq_file *s, void *unused)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	int index = 0;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
531*4882a593Smuzhiyun 		if (zip_dev[index]) {
532*4882a593Smuzhiyun 			memset(&zip_dev[index]->stats, 0,
533*4882a593Smuzhiyun 			       sizeof(struct zip_stats));
534*4882a593Smuzhiyun 			seq_printf(s, "Cleared stats for zip %d\n", index);
535*4882a593Smuzhiyun 		}
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun static struct zip_registers zipregs[64] = {
542*4882a593Smuzhiyun 	{"ZIP_CMD_CTL        ",  0x0000ull},
543*4882a593Smuzhiyun 	{"ZIP_THROTTLE       ",  0x0010ull},
544*4882a593Smuzhiyun 	{"ZIP_CONSTANTS      ",  0x00A0ull},
545*4882a593Smuzhiyun 	{"ZIP_QUE0_MAP       ",  0x1400ull},
546*4882a593Smuzhiyun 	{"ZIP_QUE1_MAP       ",  0x1408ull},
547*4882a593Smuzhiyun 	{"ZIP_QUE_ENA        ",  0x0500ull},
548*4882a593Smuzhiyun 	{"ZIP_QUE_PRI        ",  0x0508ull},
549*4882a593Smuzhiyun 	{"ZIP_QUE0_DONE      ",  0x2000ull},
550*4882a593Smuzhiyun 	{"ZIP_QUE1_DONE      ",  0x2008ull},
551*4882a593Smuzhiyun 	{"ZIP_QUE0_DOORBELL  ",  0x4000ull},
552*4882a593Smuzhiyun 	{"ZIP_QUE1_DOORBELL  ",  0x4008ull},
553*4882a593Smuzhiyun 	{"ZIP_QUE0_SBUF_ADDR ",  0x1000ull},
554*4882a593Smuzhiyun 	{"ZIP_QUE1_SBUF_ADDR ",  0x1008ull},
555*4882a593Smuzhiyun 	{"ZIP_QUE0_SBUF_CTL  ",  0x1200ull},
556*4882a593Smuzhiyun 	{"ZIP_QUE1_SBUF_CTL  ",  0x1208ull},
557*4882a593Smuzhiyun 	{ NULL, 0}
558*4882a593Smuzhiyun };
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun /* Prints registers' contents */
zip_regs_show(struct seq_file * s,void * unused)561*4882a593Smuzhiyun static int zip_regs_show(struct seq_file *s, void *unused)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	u64 val = 0;
564*4882a593Smuzhiyun 	int i = 0, index = 0;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
567*4882a593Smuzhiyun 		if (zip_dev[index]) {
568*4882a593Smuzhiyun 			seq_printf(s, "--------------------------------\n"
569*4882a593Smuzhiyun 				      "     ZIP Device %d Registers\n"
570*4882a593Smuzhiyun 				      "--------------------------------\n",
571*4882a593Smuzhiyun 				      index);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 			i = 0;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 			while (zipregs[i].reg_name) {
576*4882a593Smuzhiyun 				val = zip_reg_read((zip_dev[index]->reg_base +
577*4882a593Smuzhiyun 						    zipregs[i].reg_offset));
578*4882a593Smuzhiyun 				seq_printf(s, "%s: 0x%016llx\n",
579*4882a593Smuzhiyun 					   zipregs[i].reg_name, val);
580*4882a593Smuzhiyun 				i++;
581*4882a593Smuzhiyun 			}
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(zip_stats);
588*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(zip_clear);
589*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(zip_regs);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /* Root directory for thunderx_zip debugfs entry */
592*4882a593Smuzhiyun static struct dentry *zip_debugfs_root;
593*4882a593Smuzhiyun 
zip_debugfs_init(void)594*4882a593Smuzhiyun static void __init zip_debugfs_init(void)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	if (!debugfs_initialized())
597*4882a593Smuzhiyun 		return;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/* Creating files for entries inside thunderx_zip directory */
602*4882a593Smuzhiyun 	debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
603*4882a593Smuzhiyun 			    &zip_stats_fops);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
606*4882a593Smuzhiyun 			    &zip_clear_fops);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
609*4882a593Smuzhiyun 			    &zip_regs_fops);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
zip_debugfs_exit(void)613*4882a593Smuzhiyun static void __exit zip_debugfs_exit(void)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	debugfs_remove_recursive(zip_debugfs_root);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun #else
zip_debugfs_init(void)619*4882a593Smuzhiyun static void __init zip_debugfs_init(void) { }
zip_debugfs_exit(void)620*4882a593Smuzhiyun static void __exit zip_debugfs_exit(void) { }
621*4882a593Smuzhiyun #endif
622*4882a593Smuzhiyun /* debugfs - end */
623*4882a593Smuzhiyun 
zip_init_module(void)624*4882a593Smuzhiyun static int __init zip_init_module(void)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	int ret;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	zip_msg("%s\n", DRV_NAME);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	ret = pci_register_driver(&zip_driver);
631*4882a593Smuzhiyun 	if (ret < 0) {
632*4882a593Smuzhiyun 		zip_err("ZIP: pci_register_driver() failed\n");
633*4882a593Smuzhiyun 		return ret;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* Register with the Kernel Crypto Interface */
637*4882a593Smuzhiyun 	ret = zip_register_compression_device();
638*4882a593Smuzhiyun 	if (ret < 0) {
639*4882a593Smuzhiyun 		zip_err("ZIP: Kernel Crypto Registration failed\n");
640*4882a593Smuzhiyun 		goto err_pci_unregister;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* comp-decomp statistics are handled with debugfs interface */
644*4882a593Smuzhiyun 	zip_debugfs_init();
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	return ret;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun err_pci_unregister:
649*4882a593Smuzhiyun 	pci_unregister_driver(&zip_driver);
650*4882a593Smuzhiyun 	return ret;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
zip_cleanup_module(void)653*4882a593Smuzhiyun static void __exit zip_cleanup_module(void)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	zip_debugfs_exit();
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* Unregister from the kernel crypto interface */
658*4882a593Smuzhiyun 	zip_unregister_compression_device();
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* Unregister this driver for pci zip devices */
661*4882a593Smuzhiyun 	pci_unregister_driver(&zip_driver);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun module_init(zip_init_module);
665*4882a593Smuzhiyun module_exit(zip_cleanup_module);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun MODULE_AUTHOR("Cavium Inc");
668*4882a593Smuzhiyun MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
669*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
670*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, zip_id_table);
671