xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/cavium-thunderx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun  * for more details.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2016 Cavium Inc.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/dma-mapping.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_platform.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include "cavium.h"
19*4882a593Smuzhiyun 
thunder_mmc_acquire_bus(struct cvm_mmc_host * host)20*4882a593Smuzhiyun static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	down(&host->mmc_serializer);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
thunder_mmc_release_bus(struct cvm_mmc_host * host)25*4882a593Smuzhiyun static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	up(&host->mmc_serializer);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
thunder_mmc_int_enable(struct cvm_mmc_host * host,u64 val)30*4882a593Smuzhiyun static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	writeq(val, host->base + MIO_EMM_INT(host));
33*4882a593Smuzhiyun 	writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
thunder_mmc_register_interrupts(struct cvm_mmc_host * host,struct pci_dev * pdev)36*4882a593Smuzhiyun static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
37*4882a593Smuzhiyun 					   struct pci_dev *pdev)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	int nvec, ret, i;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
42*4882a593Smuzhiyun 	if (nvec < 0)
43*4882a593Smuzhiyun 		return nvec;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* register interrupts */
46*4882a593Smuzhiyun 	for (i = 0; i < nvec; i++) {
47*4882a593Smuzhiyun 		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
48*4882a593Smuzhiyun 				       cvm_mmc_interrupt,
49*4882a593Smuzhiyun 				       0, cvm_mmc_irq_names[i], host);
50*4882a593Smuzhiyun 		if (ret)
51*4882a593Smuzhiyun 			return ret;
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun 	return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
thunder_mmc_probe(struct pci_dev * pdev,const struct pci_device_id * id)56*4882a593Smuzhiyun static int thunder_mmc_probe(struct pci_dev *pdev,
57*4882a593Smuzhiyun 			     const struct pci_device_id *id)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct device_node *node = pdev->dev.of_node;
60*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
61*4882a593Smuzhiyun 	struct device_node *child_node;
62*4882a593Smuzhiyun 	struct cvm_mmc_host *host;
63*4882a593Smuzhiyun 	int ret, i = 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
66*4882a593Smuzhiyun 	if (!host)
67*4882a593Smuzhiyun 		return -ENOMEM;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	pci_set_drvdata(pdev, host);
70*4882a593Smuzhiyun 	ret = pcim_enable_device(pdev);
71*4882a593Smuzhiyun 	if (ret)
72*4882a593Smuzhiyun 		return ret;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
75*4882a593Smuzhiyun 	if (ret)
76*4882a593Smuzhiyun 		return ret;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
79*4882a593Smuzhiyun 	if (!host->base) {
80*4882a593Smuzhiyun 		ret = -EINVAL;
81*4882a593Smuzhiyun 		goto error;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* On ThunderX these are identical */
85*4882a593Smuzhiyun 	host->dma_base = host->base;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	host->reg_off = 0x2000;
88*4882a593Smuzhiyun 	host->reg_off_dma = 0x160;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	host->clk = devm_clk_get(dev, NULL);
91*4882a593Smuzhiyun 	if (IS_ERR(host->clk)) {
92*4882a593Smuzhiyun 		ret = PTR_ERR(host->clk);
93*4882a593Smuzhiyun 		goto error;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	ret = clk_prepare_enable(host->clk);
97*4882a593Smuzhiyun 	if (ret)
98*4882a593Smuzhiyun 		goto error;
99*4882a593Smuzhiyun 	host->sys_freq = clk_get_rate(host->clk);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	spin_lock_init(&host->irq_handler_lock);
102*4882a593Smuzhiyun 	sema_init(&host->mmc_serializer, 1);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	host->dev = dev;
105*4882a593Smuzhiyun 	host->acquire_bus = thunder_mmc_acquire_bus;
106*4882a593Smuzhiyun 	host->release_bus = thunder_mmc_release_bus;
107*4882a593Smuzhiyun 	host->int_enable = thunder_mmc_int_enable;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	host->use_sg = true;
110*4882a593Smuzhiyun 	host->big_dma_addr = true;
111*4882a593Smuzhiyun 	host->need_irq_handler_lock = true;
112*4882a593Smuzhiyun 	host->last_slot = -1;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
115*4882a593Smuzhiyun 	if (ret)
116*4882a593Smuzhiyun 		goto error;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * Clear out any pending interrupts that may be left over from
120*4882a593Smuzhiyun 	 * bootloader. Writing 1 to the bits clears them.
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	writeq(127, host->base + MIO_EMM_INT_EN(host));
123*4882a593Smuzhiyun 	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
124*4882a593Smuzhiyun 	/* Clear DMA FIFO */
125*4882a593Smuzhiyun 	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	ret = thunder_mmc_register_interrupts(host, pdev);
128*4882a593Smuzhiyun 	if (ret)
129*4882a593Smuzhiyun 		goto error;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for_each_child_of_node(node, child_node) {
132*4882a593Smuzhiyun 		/*
133*4882a593Smuzhiyun 		 * mmc_of_parse and devm* require one device per slot.
134*4882a593Smuzhiyun 		 * Create a dummy device per slot and set the node pointer to
135*4882a593Smuzhiyun 		 * the slot. The easiest way to get this is using
136*4882a593Smuzhiyun 		 * of_platform_device_create.
137*4882a593Smuzhiyun 		 */
138*4882a593Smuzhiyun 		if (of_device_is_compatible(child_node, "mmc-slot")) {
139*4882a593Smuzhiyun 			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
140*4882a593Smuzhiyun 								       &pdev->dev);
141*4882a593Smuzhiyun 			if (!host->slot_pdev[i])
142*4882a593Smuzhiyun 				continue;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
145*4882a593Smuzhiyun 			if (ret) {
146*4882a593Smuzhiyun 				of_node_put(child_node);
147*4882a593Smuzhiyun 				goto error;
148*4882a593Smuzhiyun 			}
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 		i++;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	dev_info(dev, "probed\n");
153*4882a593Smuzhiyun 	return 0;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun error:
156*4882a593Smuzhiyun 	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
157*4882a593Smuzhiyun 		if (host->slot[i])
158*4882a593Smuzhiyun 			cvm_mmc_of_slot_remove(host->slot[i]);
159*4882a593Smuzhiyun 		if (host->slot_pdev[i]) {
160*4882a593Smuzhiyun 			get_device(&host->slot_pdev[i]->dev);
161*4882a593Smuzhiyun 			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
162*4882a593Smuzhiyun 			put_device(&host->slot_pdev[i]->dev);
163*4882a593Smuzhiyun 		}
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
166*4882a593Smuzhiyun 	pci_release_regions(pdev);
167*4882a593Smuzhiyun 	return ret;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
thunder_mmc_remove(struct pci_dev * pdev)170*4882a593Smuzhiyun static void thunder_mmc_remove(struct pci_dev *pdev)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct cvm_mmc_host *host = pci_get_drvdata(pdev);
173*4882a593Smuzhiyun 	u64 dma_cfg;
174*4882a593Smuzhiyun 	int i;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	for (i = 0; i < CAVIUM_MAX_MMC; i++)
177*4882a593Smuzhiyun 		if (host->slot[i])
178*4882a593Smuzhiyun 			cvm_mmc_of_slot_remove(host->slot[i]);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
181*4882a593Smuzhiyun 	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
182*4882a593Smuzhiyun 	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
185*4882a593Smuzhiyun 	pci_release_regions(pdev);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun static const struct pci_device_id thunder_mmc_id_table[] = {
189*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
190*4882a593Smuzhiyun 	{ 0, }  /* end of table */
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun static struct pci_driver thunder_mmc_driver = {
194*4882a593Smuzhiyun 	.name = KBUILD_MODNAME,
195*4882a593Smuzhiyun 	.id_table = thunder_mmc_id_table,
196*4882a593Smuzhiyun 	.probe = thunder_mmc_probe,
197*4882a593Smuzhiyun 	.remove = thunder_mmc_remove,
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun module_pci_driver(thunder_mmc_driver);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun MODULE_AUTHOR("Cavium Inc.");
203*4882a593Smuzhiyun MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
204*4882a593Smuzhiyun MODULE_LICENSE("GPL");
205*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
206