xref: /OK3568_Linux_fs/kernel/drivers/ata/pata_pxa.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Generic PXA PATA driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/ata.h>
12*4882a593Smuzhiyun #include <linux/libata.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/dmaengine.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/completion.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <scsi/scsi_host.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/platform_data/ata-pxa.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define DRV_NAME	"pata_pxa"
23*4882a593Smuzhiyun #define DRV_VERSION	"0.1"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct pata_pxa_data {
26*4882a593Smuzhiyun 	struct dma_chan		*dma_chan;
27*4882a593Smuzhiyun 	dma_cookie_t		dma_cookie;
28*4882a593Smuzhiyun 	struct completion	dma_done;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * DMA interrupt handler.
33*4882a593Smuzhiyun  */
pxa_ata_dma_irq(void * d)34*4882a593Smuzhiyun static void pxa_ata_dma_irq(void *d)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	struct pata_pxa_data *pd = d;
37*4882a593Smuzhiyun 	enum dma_status status;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
40*4882a593Smuzhiyun 	if (status == DMA_ERROR || status == DMA_COMPLETE)
41*4882a593Smuzhiyun 		complete(&pd->dma_done);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Prepare taskfile for submission.
46*4882a593Smuzhiyun  */
pxa_qc_prep(struct ata_queued_cmd * qc)47*4882a593Smuzhiyun static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct pata_pxa_data *pd = qc->ap->private_data;
50*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
51*4882a593Smuzhiyun 	enum dma_transfer_direction dir;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
54*4882a593Smuzhiyun 		return AC_ERR_OK;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
57*4882a593Smuzhiyun 	tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
58*4882a593Smuzhiyun 				     DMA_PREP_INTERRUPT);
59*4882a593Smuzhiyun 	if (!tx) {
60*4882a593Smuzhiyun 		ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
61*4882a593Smuzhiyun 		return AC_ERR_OK;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	tx->callback = pxa_ata_dma_irq;
64*4882a593Smuzhiyun 	tx->callback_param = pd;
65*4882a593Smuzhiyun 	pd->dma_cookie = dmaengine_submit(tx);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return AC_ERR_OK;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * Configure the DMA controller, load the DMA descriptors, but don't start the
72*4882a593Smuzhiyun  * DMA controller yet. Only issue the ATA command.
73*4882a593Smuzhiyun  */
pxa_bmdma_setup(struct ata_queued_cmd * qc)74*4882a593Smuzhiyun static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * Execute the DMA transfer.
81*4882a593Smuzhiyun  */
pxa_bmdma_start(struct ata_queued_cmd * qc)82*4882a593Smuzhiyun static void pxa_bmdma_start(struct ata_queued_cmd *qc)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct pata_pxa_data *pd = qc->ap->private_data;
85*4882a593Smuzhiyun 	init_completion(&pd->dma_done);
86*4882a593Smuzhiyun 	dma_async_issue_pending(pd->dma_chan);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Wait until the DMA transfer completes, then stop the DMA controller.
91*4882a593Smuzhiyun  */
pxa_bmdma_stop(struct ata_queued_cmd * qc)92*4882a593Smuzhiyun static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct pata_pxa_data *pd = qc->ap->private_data;
95*4882a593Smuzhiyun 	enum dma_status status;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
98*4882a593Smuzhiyun 	if (status != DMA_ERROR && status != DMA_COMPLETE &&
99*4882a593Smuzhiyun 	    wait_for_completion_timeout(&pd->dma_done, HZ))
100*4882a593Smuzhiyun 		ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	dmaengine_terminate_all(pd->dma_chan);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * Read DMA status. The bmdma_stop() will take care of properly finishing the
107*4882a593Smuzhiyun  * DMA transfer so we always have DMA-complete interrupt here.
108*4882a593Smuzhiyun  */
pxa_bmdma_status(struct ata_port * ap)109*4882a593Smuzhiyun static unsigned char pxa_bmdma_status(struct ata_port *ap)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct pata_pxa_data *pd = ap->private_data;
112*4882a593Smuzhiyun 	unsigned char ret = ATA_DMA_INTR;
113*4882a593Smuzhiyun 	struct dma_tx_state state;
114*4882a593Smuzhiyun 	enum dma_status status;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
117*4882a593Smuzhiyun 	if (status != DMA_COMPLETE)
118*4882a593Smuzhiyun 		ret |= ATA_DMA_ERR;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return ret;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun  * No IRQ register present so we do nothing.
125*4882a593Smuzhiyun  */
pxa_irq_clear(struct ata_port * ap)126*4882a593Smuzhiyun static void pxa_irq_clear(struct ata_port *ap)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
132*4882a593Smuzhiyun  * unclear why ATAPI has DMA issues.
133*4882a593Smuzhiyun  */
pxa_check_atapi_dma(struct ata_queued_cmd * qc)134*4882a593Smuzhiyun static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	return -EOPNOTSUPP;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static struct scsi_host_template pxa_ata_sht = {
140*4882a593Smuzhiyun 	ATA_BMDMA_SHT(DRV_NAME),
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static struct ata_port_operations pxa_ata_port_ops = {
144*4882a593Smuzhiyun 	.inherits		= &ata_bmdma_port_ops,
145*4882a593Smuzhiyun 	.cable_detect		= ata_cable_40wire,
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	.bmdma_setup		= pxa_bmdma_setup,
148*4882a593Smuzhiyun 	.bmdma_start		= pxa_bmdma_start,
149*4882a593Smuzhiyun 	.bmdma_stop		= pxa_bmdma_stop,
150*4882a593Smuzhiyun 	.bmdma_status		= pxa_bmdma_status,
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	.check_atapi_dma	= pxa_check_atapi_dma,
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	.sff_irq_clear		= pxa_irq_clear,
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	.qc_prep		= pxa_qc_prep,
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
pxa_ata_probe(struct platform_device * pdev)159*4882a593Smuzhiyun static int pxa_ata_probe(struct platform_device *pdev)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct ata_host *host;
162*4882a593Smuzhiyun 	struct ata_port *ap;
163*4882a593Smuzhiyun 	struct pata_pxa_data *data;
164*4882a593Smuzhiyun 	struct resource *cmd_res;
165*4882a593Smuzhiyun 	struct resource *ctl_res;
166*4882a593Smuzhiyun 	struct resource *dma_res;
167*4882a593Smuzhiyun 	struct resource *irq_res;
168*4882a593Smuzhiyun 	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
169*4882a593Smuzhiyun 	struct dma_slave_config	config;
170*4882a593Smuzhiyun 	int ret = 0;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * Resource validation, three resources are needed:
174*4882a593Smuzhiyun 	 *  - CMD port base address
175*4882a593Smuzhiyun 	 *  - CTL port base address
176*4882a593Smuzhiyun 	 *  - DMA port base address
177*4882a593Smuzhiyun 	 *  - IRQ pin
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	if (pdev->num_resources != 4) {
180*4882a593Smuzhiyun 		dev_err(&pdev->dev, "invalid number of resources\n");
181*4882a593Smuzhiyun 		return -EINVAL;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/*
185*4882a593Smuzhiyun 	 * CMD port base address
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
188*4882a593Smuzhiyun 	if (unlikely(cmd_res == NULL))
189*4882a593Smuzhiyun 		return -EINVAL;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/*
192*4882a593Smuzhiyun 	 * CTL port base address
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
195*4882a593Smuzhiyun 	if (unlikely(ctl_res == NULL))
196*4882a593Smuzhiyun 		return -EINVAL;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * DMA port base address
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
202*4882a593Smuzhiyun 	if (unlikely(dma_res == NULL))
203*4882a593Smuzhiyun 		return -EINVAL;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/*
206*4882a593Smuzhiyun 	 * IRQ pin
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
209*4882a593Smuzhiyun 	if (unlikely(irq_res == NULL))
210*4882a593Smuzhiyun 		return -EINVAL;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * Allocate the host
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	host = ata_host_alloc(&pdev->dev, 1);
216*4882a593Smuzhiyun 	if (!host)
217*4882a593Smuzhiyun 		return -ENOMEM;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	ap		= host->ports[0];
220*4882a593Smuzhiyun 	ap->ops		= &pxa_ata_port_ops;
221*4882a593Smuzhiyun 	ap->pio_mask	= ATA_PIO4;
222*4882a593Smuzhiyun 	ap->mwdma_mask	= ATA_MWDMA2;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
225*4882a593Smuzhiyun 						resource_size(cmd_res));
226*4882a593Smuzhiyun 	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
227*4882a593Smuzhiyun 						resource_size(ctl_res));
228*4882a593Smuzhiyun 	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
229*4882a593Smuzhiyun 						resource_size(dma_res));
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/*
232*4882a593Smuzhiyun 	 * Adjust register offsets
233*4882a593Smuzhiyun 	 */
234*4882a593Smuzhiyun 	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
235*4882a593Smuzhiyun 	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
236*4882a593Smuzhiyun 					(ATA_REG_DATA << pdata->reg_shift);
237*4882a593Smuzhiyun 	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
238*4882a593Smuzhiyun 					(ATA_REG_ERR << pdata->reg_shift);
239*4882a593Smuzhiyun 	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
240*4882a593Smuzhiyun 					(ATA_REG_FEATURE << pdata->reg_shift);
241*4882a593Smuzhiyun 	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
242*4882a593Smuzhiyun 					(ATA_REG_NSECT << pdata->reg_shift);
243*4882a593Smuzhiyun 	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
244*4882a593Smuzhiyun 					(ATA_REG_LBAL << pdata->reg_shift);
245*4882a593Smuzhiyun 	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
246*4882a593Smuzhiyun 					(ATA_REG_LBAM << pdata->reg_shift);
247*4882a593Smuzhiyun 	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
248*4882a593Smuzhiyun 					(ATA_REG_LBAH << pdata->reg_shift);
249*4882a593Smuzhiyun 	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
250*4882a593Smuzhiyun 					(ATA_REG_DEVICE << pdata->reg_shift);
251*4882a593Smuzhiyun 	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
252*4882a593Smuzhiyun 					(ATA_REG_STATUS << pdata->reg_shift);
253*4882a593Smuzhiyun 	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
254*4882a593Smuzhiyun 					(ATA_REG_CMD << pdata->reg_shift);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/*
257*4882a593Smuzhiyun 	 * Allocate and load driver's internal data structure
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
260*4882a593Smuzhiyun 								GFP_KERNEL);
261*4882a593Smuzhiyun 	if (!data)
262*4882a593Smuzhiyun 		return -ENOMEM;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ap->private_data = data;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	memset(&config, 0, sizeof(config));
267*4882a593Smuzhiyun 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
268*4882a593Smuzhiyun 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
269*4882a593Smuzhiyun 	config.src_addr = dma_res->start;
270*4882a593Smuzhiyun 	config.dst_addr = dma_res->start;
271*4882a593Smuzhiyun 	config.src_maxburst = 32;
272*4882a593Smuzhiyun 	config.dst_maxburst = 32;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/*
275*4882a593Smuzhiyun 	 * Request the DMA channel
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	data->dma_chan =
278*4882a593Smuzhiyun 		dma_request_slave_channel(&pdev->dev, "data");
279*4882a593Smuzhiyun 	if (!data->dma_chan)
280*4882a593Smuzhiyun 		return -EBUSY;
281*4882a593Smuzhiyun 	ret = dmaengine_slave_config(data->dma_chan, &config);
282*4882a593Smuzhiyun 	if (ret < 0) {
283*4882a593Smuzhiyun 		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
284*4882a593Smuzhiyun 		return ret;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/*
288*4882a593Smuzhiyun 	 * Activate the ATA host
289*4882a593Smuzhiyun 	 */
290*4882a593Smuzhiyun 	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
291*4882a593Smuzhiyun 				pdata->irq_flags, &pxa_ata_sht);
292*4882a593Smuzhiyun 	if (ret)
293*4882a593Smuzhiyun 		dma_release_channel(data->dma_chan);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
pxa_ata_remove(struct platform_device * pdev)298*4882a593Smuzhiyun static int pxa_ata_remove(struct platform_device *pdev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct ata_host *host = platform_get_drvdata(pdev);
301*4882a593Smuzhiyun 	struct pata_pxa_data *data = host->ports[0]->private_data;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	dma_release_channel(data->dma_chan);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	ata_host_detach(host);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static struct platform_driver pxa_ata_driver = {
311*4882a593Smuzhiyun 	.probe		= pxa_ata_probe,
312*4882a593Smuzhiyun 	.remove		= pxa_ata_remove,
313*4882a593Smuzhiyun 	.driver		= {
314*4882a593Smuzhiyun 		.name		= DRV_NAME,
315*4882a593Smuzhiyun 	},
316*4882a593Smuzhiyun };
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun module_platform_driver(pxa_ata_driver);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
321*4882a593Smuzhiyun MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
322*4882a593Smuzhiyun MODULE_LICENSE("GPL");
323*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
324*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRV_NAME);
325