xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/alcor.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Driver for Alcor Micro AU6601 and AU6621 controllers
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /* Note: this driver was created without any documentation. Based
9*4882a593Smuzhiyun  * on sniffing, testing and in some cases mimic of original driver.
10*4882a593Smuzhiyun  * As soon as some one with documentation or more experience in SD/MMC, or
11*4882a593Smuzhiyun  * reverse engineering then me, please review this driver and question every
12*4882a593Smuzhiyun  * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/pm.h>
20*4882a593Smuzhiyun #include <linux/irq.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/mmc/host.h>
25*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/alcor_pci.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun enum alcor_cookie {
30*4882a593Smuzhiyun 	COOKIE_UNMAPPED,
31*4882a593Smuzhiyun 	COOKIE_PRE_MAPPED,
32*4882a593Smuzhiyun 	COOKIE_MAPPED,
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun struct alcor_pll_conf {
36*4882a593Smuzhiyun 	unsigned int clk_src_freq;
37*4882a593Smuzhiyun 	unsigned int clk_src_reg;
38*4882a593Smuzhiyun 	unsigned int min_div;
39*4882a593Smuzhiyun 	unsigned int max_div;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct alcor_sdmmc_host {
43*4882a593Smuzhiyun 	struct  device *dev;
44*4882a593Smuzhiyun 	struct alcor_pci_priv *alcor_pci;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	struct mmc_request *mrq;
47*4882a593Smuzhiyun 	struct mmc_command *cmd;
48*4882a593Smuzhiyun 	struct mmc_data *data;
49*4882a593Smuzhiyun 	unsigned int dma_on:1;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	struct mutex cmd_mutex;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	struct delayed_work timeout_work;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	struct sg_mapping_iter sg_miter;	/* SG state for PIO */
56*4882a593Smuzhiyun 	struct scatterlist *sg;
57*4882a593Smuzhiyun 	unsigned int blocks;		/* remaining PIO blocks */
58*4882a593Smuzhiyun 	int sg_count;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	u32			irq_status_sd;
61*4882a593Smuzhiyun 	unsigned char		cur_power_mode;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static const struct alcor_pll_conf alcor_pll_cfg[] = {
65*4882a593Smuzhiyun 	/* MHZ,		CLK src,		max div, min div */
66*4882a593Smuzhiyun 	{ 31250000,	AU6601_CLK_31_25_MHZ,	1,	511},
67*4882a593Smuzhiyun 	{ 48000000,	AU6601_CLK_48_MHZ,	1,	511},
68*4882a593Smuzhiyun 	{125000000,	AU6601_CLK_125_MHZ,	1,	511},
69*4882a593Smuzhiyun 	{384000000,	AU6601_CLK_384_MHZ,	1,	511},
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
alcor_rmw8(struct alcor_sdmmc_host * host,unsigned int addr,u8 clear,u8 set)72*4882a593Smuzhiyun static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
73*4882a593Smuzhiyun 			       u8 clear, u8 set)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
76*4882a593Smuzhiyun 	u32 var;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	var = alcor_read8(priv, addr);
79*4882a593Smuzhiyun 	var &= ~clear;
80*4882a593Smuzhiyun 	var |= set;
81*4882a593Smuzhiyun 	alcor_write8(priv, var, addr);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* As soon as irqs are masked, some status updates may be missed.
85*4882a593Smuzhiyun  * Use this with care.
86*4882a593Smuzhiyun  */
alcor_mask_sd_irqs(struct alcor_sdmmc_host * host)87*4882a593Smuzhiyun static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
alcor_unmask_sd_irqs(struct alcor_sdmmc_host * host)94*4882a593Smuzhiyun static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
99*4882a593Smuzhiyun 		  AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
100*4882a593Smuzhiyun 		  AU6601_INT_OVER_CURRENT_ERR,
101*4882a593Smuzhiyun 		  AU6601_REG_INT_ENABLE);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
alcor_reset(struct alcor_sdmmc_host * host,u8 val)104*4882a593Smuzhiyun static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
107*4882a593Smuzhiyun 	int i;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
110*4882a593Smuzhiyun 		      AU6601_REG_SW_RESET);
111*4882a593Smuzhiyun 	for (i = 0; i < 100; i++) {
112*4882a593Smuzhiyun 		if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
113*4882a593Smuzhiyun 			return;
114*4882a593Smuzhiyun 		udelay(50);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	dev_err(host->dev, "%s: timeout\n", __func__);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Perform DMA I/O of a single page.
121*4882a593Smuzhiyun  */
alcor_data_set_dma(struct alcor_sdmmc_host * host)122*4882a593Smuzhiyun static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
125*4882a593Smuzhiyun 	u32 addr;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (!host->sg_count)
128*4882a593Smuzhiyun 		return;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!host->sg) {
131*4882a593Smuzhiyun 		dev_err(host->dev, "have blocks, but no SG\n");
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (!sg_dma_len(host->sg)) {
136*4882a593Smuzhiyun 		dev_err(host->dev, "DMA SG len == 0\n");
137*4882a593Smuzhiyun 		return;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	addr = (u32)sg_dma_address(host->sg);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
144*4882a593Smuzhiyun 	host->sg = sg_next(host->sg);
145*4882a593Smuzhiyun 	host->sg_count--;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
alcor_trigger_data_transfer(struct alcor_sdmmc_host * host)148*4882a593Smuzhiyun static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
151*4882a593Smuzhiyun 	struct mmc_data *data = host->data;
152*4882a593Smuzhiyun 	u8 ctrl = 0;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_WRITE)
155*4882a593Smuzhiyun 		ctrl |= AU6601_DATA_WRITE;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (data->host_cookie == COOKIE_MAPPED) {
158*4882a593Smuzhiyun 		/*
159*4882a593Smuzhiyun 		 * For DMA transfers, this function is called just once,
160*4882a593Smuzhiyun 		 * at the start of the operation. The hardware can only
161*4882a593Smuzhiyun 		 * perform DMA I/O on a single page at a time, so here
162*4882a593Smuzhiyun 		 * we kick off the transfer with the first page, and expect
163*4882a593Smuzhiyun 		 * subsequent pages to be transferred upon IRQ events
164*4882a593Smuzhiyun 		 * indicating that the single-page DMA was completed.
165*4882a593Smuzhiyun 		 */
166*4882a593Smuzhiyun 		alcor_data_set_dma(host);
167*4882a593Smuzhiyun 		ctrl |= AU6601_DATA_DMA_MODE;
168*4882a593Smuzhiyun 		host->dma_on = 1;
169*4882a593Smuzhiyun 		alcor_write32(priv, data->sg_count * 0x1000,
170*4882a593Smuzhiyun 			       AU6601_REG_BLOCK_SIZE);
171*4882a593Smuzhiyun 	} else {
172*4882a593Smuzhiyun 		/*
173*4882a593Smuzhiyun 		 * For PIO transfers, we break down each operation
174*4882a593Smuzhiyun 		 * into several sector-sized transfers. When one sector has
175*4882a593Smuzhiyun 		 * complete, the IRQ handler will call this function again
176*4882a593Smuzhiyun 		 * to kick off the transfer of the next sector.
177*4882a593Smuzhiyun 		 */
178*4882a593Smuzhiyun 		alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
182*4882a593Smuzhiyun 		      AU6601_DATA_XFER_CTRL);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
alcor_trf_block_pio(struct alcor_sdmmc_host * host,bool read)185*4882a593Smuzhiyun static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
188*4882a593Smuzhiyun 	size_t blksize, len;
189*4882a593Smuzhiyun 	u8 *buf;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (!host->blocks)
192*4882a593Smuzhiyun 		return;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (host->dma_on) {
195*4882a593Smuzhiyun 		dev_err(host->dev, "configured DMA but got PIO request.\n");
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (!!(host->data->flags & MMC_DATA_READ) != read) {
200*4882a593Smuzhiyun 		dev_err(host->dev, "got unexpected direction %i != %i\n",
201*4882a593Smuzhiyun 			!!(host->data->flags & MMC_DATA_READ), read);
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (!sg_miter_next(&host->sg_miter))
205*4882a593Smuzhiyun 		return;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	blksize = host->data->blksz;
208*4882a593Smuzhiyun 	len = min(host->sg_miter.length, blksize);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
211*4882a593Smuzhiyun 		read ? "read" : "write", blksize);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	host->sg_miter.consumed = len;
214*4882a593Smuzhiyun 	host->blocks--;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	buf = host->sg_miter.addr;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (read)
219*4882a593Smuzhiyun 		ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
220*4882a593Smuzhiyun 	else
221*4882a593Smuzhiyun 		iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	sg_miter_stop(&host->sg_miter);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
alcor_prepare_sg_miter(struct alcor_sdmmc_host * host)226*4882a593Smuzhiyun static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	unsigned int flags = SG_MITER_ATOMIC;
229*4882a593Smuzhiyun 	struct mmc_data *data = host->data;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ)
232*4882a593Smuzhiyun 		flags |= SG_MITER_TO_SG;
233*4882a593Smuzhiyun 	else
234*4882a593Smuzhiyun 		flags |= SG_MITER_FROM_SG;
235*4882a593Smuzhiyun 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
alcor_prepare_data(struct alcor_sdmmc_host * host,struct mmc_command * cmd)238*4882a593Smuzhiyun static void alcor_prepare_data(struct alcor_sdmmc_host *host,
239*4882a593Smuzhiyun 			       struct mmc_command *cmd)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
242*4882a593Smuzhiyun 	struct mmc_data *data = cmd->data;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (!data)
245*4882a593Smuzhiyun 		return;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	host->data = data;
249*4882a593Smuzhiyun 	host->data->bytes_xfered = 0;
250*4882a593Smuzhiyun 	host->blocks = data->blocks;
251*4882a593Smuzhiyun 	host->sg = data->sg;
252*4882a593Smuzhiyun 	host->sg_count = data->sg_count;
253*4882a593Smuzhiyun 	dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
254*4882a593Smuzhiyun 			host->sg_count, host->blocks);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (data->host_cookie != COOKIE_MAPPED)
257*4882a593Smuzhiyun 		alcor_prepare_sg_miter(host);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
alcor_send_cmd(struct alcor_sdmmc_host * host,struct mmc_command * cmd,bool set_timeout)262*4882a593Smuzhiyun static void alcor_send_cmd(struct alcor_sdmmc_host *host,
263*4882a593Smuzhiyun 			   struct mmc_command *cmd, bool set_timeout)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
266*4882a593Smuzhiyun 	unsigned long timeout = 0;
267*4882a593Smuzhiyun 	u8 ctrl = 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	host->cmd = cmd;
270*4882a593Smuzhiyun 	alcor_prepare_data(host, cmd);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
273*4882a593Smuzhiyun 		cmd->opcode, cmd->arg);
274*4882a593Smuzhiyun 	alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
275*4882a593Smuzhiyun 	alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	switch (mmc_resp_type(cmd)) {
278*4882a593Smuzhiyun 	case MMC_RSP_NONE:
279*4882a593Smuzhiyun 		ctrl = AU6601_CMD_NO_RESP;
280*4882a593Smuzhiyun 		break;
281*4882a593Smuzhiyun 	case MMC_RSP_R1:
282*4882a593Smuzhiyun 		ctrl = AU6601_CMD_6_BYTE_CRC;
283*4882a593Smuzhiyun 		break;
284*4882a593Smuzhiyun 	case MMC_RSP_R1B:
285*4882a593Smuzhiyun 		ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 	case MMC_RSP_R2:
288*4882a593Smuzhiyun 		ctrl = AU6601_CMD_17_BYTE_CRC;
289*4882a593Smuzhiyun 		break;
290*4882a593Smuzhiyun 	case MMC_RSP_R3:
291*4882a593Smuzhiyun 		ctrl = AU6601_CMD_6_BYTE_WO_CRC;
292*4882a593Smuzhiyun 		break;
293*4882a593Smuzhiyun 	default:
294*4882a593Smuzhiyun 		dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
295*4882a593Smuzhiyun 			mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
296*4882a593Smuzhiyun 		break;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (set_timeout) {
300*4882a593Smuzhiyun 		if (!cmd->data && cmd->busy_timeout)
301*4882a593Smuzhiyun 			timeout = cmd->busy_timeout;
302*4882a593Smuzhiyun 		else
303*4882a593Smuzhiyun 			timeout = 10000;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		schedule_delayed_work(&host->timeout_work,
306*4882a593Smuzhiyun 				      msecs_to_jiffies(timeout));
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
310*4882a593Smuzhiyun 	alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
311*4882a593Smuzhiyun 				 AU6601_CMD_XFER_CTRL);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
alcor_request_complete(struct alcor_sdmmc_host * host,bool cancel_timeout)314*4882a593Smuzhiyun static void alcor_request_complete(struct alcor_sdmmc_host *host,
315*4882a593Smuzhiyun 				   bool cancel_timeout)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct mmc_request *mrq;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/*
320*4882a593Smuzhiyun 	 * If this work gets rescheduled while running, it will
321*4882a593Smuzhiyun 	 * be run again afterwards but without any active request.
322*4882a593Smuzhiyun 	 */
323*4882a593Smuzhiyun 	if (!host->mrq)
324*4882a593Smuzhiyun 		return;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (cancel_timeout)
327*4882a593Smuzhiyun 		cancel_delayed_work(&host->timeout_work);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	mrq = host->mrq;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	host->mrq = NULL;
332*4882a593Smuzhiyun 	host->cmd = NULL;
333*4882a593Smuzhiyun 	host->data = NULL;
334*4882a593Smuzhiyun 	host->dma_on = 0;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	mmc_request_done(mmc_from_priv(host), mrq);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
alcor_finish_data(struct alcor_sdmmc_host * host)339*4882a593Smuzhiyun static void alcor_finish_data(struct alcor_sdmmc_host *host)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	struct mmc_data *data;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	data = host->data;
344*4882a593Smuzhiyun 	host->data = NULL;
345*4882a593Smuzhiyun 	host->dma_on = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/*
348*4882a593Smuzhiyun 	 * The specification states that the block count register must
349*4882a593Smuzhiyun 	 * be updated, but it does not specify at what point in the
350*4882a593Smuzhiyun 	 * data flow. That makes the register entirely useless to read
351*4882a593Smuzhiyun 	 * back so we have to assume that nothing made it to the card
352*4882a593Smuzhiyun 	 * in the event of an error.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	if (data->error)
355*4882a593Smuzhiyun 		data->bytes_xfered = 0;
356*4882a593Smuzhiyun 	else
357*4882a593Smuzhiyun 		data->bytes_xfered = data->blksz * data->blocks;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/*
360*4882a593Smuzhiyun 	 * Need to send CMD12 if -
361*4882a593Smuzhiyun 	 * a) open-ended multiblock transfer (no CMD23)
362*4882a593Smuzhiyun 	 * b) error in multiblock transfer
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	if (data->stop &&
365*4882a593Smuzhiyun 	    (data->error ||
366*4882a593Smuzhiyun 	     !host->mrq->sbc)) {
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		/*
369*4882a593Smuzhiyun 		 * The controller needs a reset of internal state machines
370*4882a593Smuzhiyun 		 * upon error conditions.
371*4882a593Smuzhiyun 		 */
372*4882a593Smuzhiyun 		if (data->error)
373*4882a593Smuzhiyun 			alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		alcor_unmask_sd_irqs(host);
376*4882a593Smuzhiyun 		alcor_send_cmd(host, data->stop, false);
377*4882a593Smuzhiyun 		return;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	alcor_request_complete(host, 1);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
alcor_err_irq(struct alcor_sdmmc_host * host,u32 intmask)383*4882a593Smuzhiyun static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (host->cmd) {
388*4882a593Smuzhiyun 		if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
389*4882a593Smuzhiyun 			host->cmd->error = -ETIMEDOUT;
390*4882a593Smuzhiyun 		else
391*4882a593Smuzhiyun 			host->cmd->error = -EILSEQ;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (host->data) {
395*4882a593Smuzhiyun 		if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
396*4882a593Smuzhiyun 			host->data->error = -ETIMEDOUT;
397*4882a593Smuzhiyun 		else
398*4882a593Smuzhiyun 			host->data->error = -EILSEQ;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		host->data->bytes_xfered = 0;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
404*4882a593Smuzhiyun 	alcor_request_complete(host, 1);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
alcor_cmd_irq_done(struct alcor_sdmmc_host * host,u32 intmask)407*4882a593Smuzhiyun static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	intmask &= AU6601_INT_CMD_END;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (!intmask)
414*4882a593Smuzhiyun 		return true;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* got CMD_END but no CMD is in progress, wake thread an process the
417*4882a593Smuzhiyun 	 * error
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	if (!host->cmd)
420*4882a593Smuzhiyun 		return false;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (host->cmd->flags & MMC_RSP_PRESENT) {
423*4882a593Smuzhiyun 		struct mmc_command *cmd = host->cmd;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
426*4882a593Smuzhiyun 		dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
427*4882a593Smuzhiyun 		if (host->cmd->flags & MMC_RSP_136) {
428*4882a593Smuzhiyun 			cmd->resp[1] =
429*4882a593Smuzhiyun 				alcor_read32be(priv, AU6601_REG_CMD_RSP1);
430*4882a593Smuzhiyun 			cmd->resp[2] =
431*4882a593Smuzhiyun 				alcor_read32be(priv, AU6601_REG_CMD_RSP2);
432*4882a593Smuzhiyun 			cmd->resp[3] =
433*4882a593Smuzhiyun 				alcor_read32be(priv, AU6601_REG_CMD_RSP3);
434*4882a593Smuzhiyun 			dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
435*4882a593Smuzhiyun 				cmd->resp[1], cmd->resp[2], cmd->resp[3]);
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	host->cmd->error = 0;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* Processed actual command. */
443*4882a593Smuzhiyun 	if (!host->data)
444*4882a593Smuzhiyun 		return false;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	alcor_trigger_data_transfer(host);
447*4882a593Smuzhiyun 	host->cmd = NULL;
448*4882a593Smuzhiyun 	return true;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
alcor_cmd_irq_thread(struct alcor_sdmmc_host * host,u32 intmask)451*4882a593Smuzhiyun static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	intmask &= AU6601_INT_CMD_END;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (!intmask)
456*4882a593Smuzhiyun 		return;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (!host->cmd && intmask & AU6601_INT_CMD_END) {
459*4882a593Smuzhiyun 		dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
460*4882a593Smuzhiyun 			intmask);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* Processed actual command. */
464*4882a593Smuzhiyun 	if (!host->data)
465*4882a593Smuzhiyun 		alcor_request_complete(host, 1);
466*4882a593Smuzhiyun 	else
467*4882a593Smuzhiyun 		alcor_trigger_data_transfer(host);
468*4882a593Smuzhiyun 	host->cmd = NULL;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
alcor_data_irq_done(struct alcor_sdmmc_host * host,u32 intmask)471*4882a593Smuzhiyun static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	u32 tmp;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	intmask &= AU6601_INT_DATA_MASK;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	/* nothing here to do */
478*4882a593Smuzhiyun 	if (!intmask)
479*4882a593Smuzhiyun 		return 1;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* we was too fast and got DATA_END after it was processed?
482*4882a593Smuzhiyun 	 * lets ignore it for now.
483*4882a593Smuzhiyun 	 */
484*4882a593Smuzhiyun 	if (!host->data && intmask == AU6601_INT_DATA_END)
485*4882a593Smuzhiyun 		return 1;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* looks like an error, so lets handle it. */
488*4882a593Smuzhiyun 	if (!host->data)
489*4882a593Smuzhiyun 		return 0;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
492*4882a593Smuzhiyun 			 | AU6601_INT_DMA_END);
493*4882a593Smuzhiyun 	switch (tmp) {
494*4882a593Smuzhiyun 	case 0:
495*4882a593Smuzhiyun 		break;
496*4882a593Smuzhiyun 	case AU6601_INT_READ_BUF_RDY:
497*4882a593Smuzhiyun 		alcor_trf_block_pio(host, true);
498*4882a593Smuzhiyun 		return 1;
499*4882a593Smuzhiyun 	case AU6601_INT_WRITE_BUF_RDY:
500*4882a593Smuzhiyun 		alcor_trf_block_pio(host, false);
501*4882a593Smuzhiyun 		return 1;
502*4882a593Smuzhiyun 	case AU6601_INT_DMA_END:
503*4882a593Smuzhiyun 		if (!host->sg_count)
504*4882a593Smuzhiyun 			break;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		alcor_data_set_dma(host);
507*4882a593Smuzhiyun 		break;
508*4882a593Smuzhiyun 	default:
509*4882a593Smuzhiyun 		dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
510*4882a593Smuzhiyun 		break;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (intmask & AU6601_INT_DATA_END) {
514*4882a593Smuzhiyun 		if (!host->dma_on && host->blocks) {
515*4882a593Smuzhiyun 			alcor_trigger_data_transfer(host);
516*4882a593Smuzhiyun 			return 1;
517*4882a593Smuzhiyun 		} else {
518*4882a593Smuzhiyun 			return 0;
519*4882a593Smuzhiyun 		}
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	return 1;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
alcor_data_irq_thread(struct alcor_sdmmc_host * host,u32 intmask)525*4882a593Smuzhiyun static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	intmask &= AU6601_INT_DATA_MASK;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (!intmask)
530*4882a593Smuzhiyun 		return;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (!host->data) {
533*4882a593Smuzhiyun 		dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
534*4882a593Smuzhiyun 			intmask);
535*4882a593Smuzhiyun 		alcor_reset(host, AU6601_RESET_DATA);
536*4882a593Smuzhiyun 		return;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (alcor_data_irq_done(host, intmask))
540*4882a593Smuzhiyun 		return;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
543*4882a593Smuzhiyun 	    (host->dma_on && !host->sg_count))
544*4882a593Smuzhiyun 		alcor_finish_data(host);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
alcor_cd_irq(struct alcor_sdmmc_host * host,u32 intmask)547*4882a593Smuzhiyun static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	dev_dbg(host->dev, "card %s\n",
550*4882a593Smuzhiyun 		intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (host->mrq) {
553*4882a593Smuzhiyun 		dev_dbg(host->dev, "cancel all pending tasks.\n");
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		if (host->data)
556*4882a593Smuzhiyun 			host->data->error = -ENOMEDIUM;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		if (host->cmd)
559*4882a593Smuzhiyun 			host->cmd->error = -ENOMEDIUM;
560*4882a593Smuzhiyun 		else
561*4882a593Smuzhiyun 			host->mrq->cmd->error = -ENOMEDIUM;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		alcor_request_complete(host, 1);
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
alcor_irq_thread(int irq,void * d)569*4882a593Smuzhiyun static irqreturn_t alcor_irq_thread(int irq, void *d)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = d;
572*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_HANDLED;
573*4882a593Smuzhiyun 	u32 intmask, tmp;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	mutex_lock(&host->cmd_mutex);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	intmask = host->irq_status_sd;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* some thing bad */
580*4882a593Smuzhiyun 	if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
581*4882a593Smuzhiyun 		dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
582*4882a593Smuzhiyun 		ret = IRQ_NONE;
583*4882a593Smuzhiyun 		goto exit;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
587*4882a593Smuzhiyun 	if (tmp) {
588*4882a593Smuzhiyun 		if (tmp & AU6601_INT_ERROR_MASK)
589*4882a593Smuzhiyun 			alcor_err_irq(host, tmp);
590*4882a593Smuzhiyun 		else {
591*4882a593Smuzhiyun 			alcor_cmd_irq_thread(host, tmp);
592*4882a593Smuzhiyun 			alcor_data_irq_thread(host, tmp);
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 		intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
598*4882a593Smuzhiyun 		alcor_cd_irq(host, intmask);
599*4882a593Smuzhiyun 		intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
603*4882a593Smuzhiyun 		dev_warn(host->dev,
604*4882a593Smuzhiyun 			 "warning: over current detected!\n");
605*4882a593Smuzhiyun 		intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	if (intmask)
609*4882a593Smuzhiyun 		dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun exit:
612*4882a593Smuzhiyun 	mutex_unlock(&host->cmd_mutex);
613*4882a593Smuzhiyun 	alcor_unmask_sd_irqs(host);
614*4882a593Smuzhiyun 	return ret;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 
alcor_irq(int irq,void * d)618*4882a593Smuzhiyun static irqreturn_t alcor_irq(int irq, void *d)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = d;
621*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
622*4882a593Smuzhiyun 	u32 status, tmp;
623*4882a593Smuzhiyun 	irqreturn_t ret;
624*4882a593Smuzhiyun 	int cmd_done, data_done;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	status = alcor_read32(priv, AU6601_REG_INT_STATUS);
627*4882a593Smuzhiyun 	if (!status)
628*4882a593Smuzhiyun 		return IRQ_NONE;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	alcor_write32(priv, status, AU6601_REG_INT_STATUS);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
633*4882a593Smuzhiyun 			| AU6601_INT_DATA_END | AU6601_INT_DMA_END
634*4882a593Smuzhiyun 			| AU6601_INT_CMD_END);
635*4882a593Smuzhiyun 	if (tmp == status) {
636*4882a593Smuzhiyun 		cmd_done = alcor_cmd_irq_done(host, tmp);
637*4882a593Smuzhiyun 		data_done = alcor_data_irq_done(host, tmp);
638*4882a593Smuzhiyun 		/* use fast path for simple tasks */
639*4882a593Smuzhiyun 		if (cmd_done && data_done) {
640*4882a593Smuzhiyun 			ret = IRQ_HANDLED;
641*4882a593Smuzhiyun 			goto alcor_irq_done;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	host->irq_status_sd = status;
646*4882a593Smuzhiyun 	ret = IRQ_WAKE_THREAD;
647*4882a593Smuzhiyun 	alcor_mask_sd_irqs(host);
648*4882a593Smuzhiyun alcor_irq_done:
649*4882a593Smuzhiyun 	return ret;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
alcor_set_clock(struct alcor_sdmmc_host * host,unsigned int clock)652*4882a593Smuzhiyun static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
655*4882a593Smuzhiyun 	int i, diff = 0x7fffffff, tmp_clock = 0;
656*4882a593Smuzhiyun 	u16 clk_src = 0;
657*4882a593Smuzhiyun 	u8 clk_div = 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (clock == 0) {
660*4882a593Smuzhiyun 		alcor_write16(priv, 0, AU6601_CLK_SELECT);
661*4882a593Smuzhiyun 		return;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
665*4882a593Smuzhiyun 		unsigned int tmp_div, tmp_diff;
666*4882a593Smuzhiyun 		const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
669*4882a593Smuzhiyun 		if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
670*4882a593Smuzhiyun 			continue;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 		tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
673*4882a593Smuzhiyun 		tmp_diff = abs(clock - tmp_clock);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 		if (tmp_diff < diff) {
676*4882a593Smuzhiyun 			diff = tmp_diff;
677*4882a593Smuzhiyun 			clk_src = cfg->clk_src_reg;
678*4882a593Smuzhiyun 			clk_div = tmp_div;
679*4882a593Smuzhiyun 		}
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	clk_src |= ((clk_div - 1) << 8);
683*4882a593Smuzhiyun 	clk_src |= AU6601_CLK_ENABLE;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
686*4882a593Smuzhiyun 			clock, tmp_clock, clk_div, clk_src);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
alcor_set_timing(struct mmc_host * mmc,struct mmc_ios * ios)692*4882a593Smuzhiyun static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (ios->timing == MMC_TIMING_LEGACY) {
697*4882a593Smuzhiyun 		alcor_rmw8(host, AU6601_CLK_DELAY,
698*4882a593Smuzhiyun 			    AU6601_CLK_POSITIVE_EDGE_ALL, 0);
699*4882a593Smuzhiyun 	} else {
700*4882a593Smuzhiyun 		alcor_rmw8(host, AU6601_CLK_DELAY,
701*4882a593Smuzhiyun 			    0, AU6601_CLK_POSITIVE_EDGE_ALL);
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
alcor_set_bus_width(struct mmc_host * mmc,struct mmc_ios * ios)705*4882a593Smuzhiyun static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
708*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (ios->bus_width == MMC_BUS_WIDTH_1) {
711*4882a593Smuzhiyun 		alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
712*4882a593Smuzhiyun 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
713*4882a593Smuzhiyun 		alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
714*4882a593Smuzhiyun 			      AU6601_REG_BUS_CTRL);
715*4882a593Smuzhiyun 	} else
716*4882a593Smuzhiyun 		dev_err(host->dev, "Unknown BUS mode\n");
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
alcor_card_busy(struct mmc_host * mmc)720*4882a593Smuzhiyun static int alcor_card_busy(struct mmc_host *mmc)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
723*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
724*4882a593Smuzhiyun 	u8 status;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	/* Check whether dat[0:3] low */
727*4882a593Smuzhiyun 	status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	return !(status & AU6601_BUS_STAT_DAT_MASK);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
alcor_get_cd(struct mmc_host * mmc)732*4882a593Smuzhiyun static int alcor_get_cd(struct mmc_host *mmc)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
735*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
736*4882a593Smuzhiyun 	u8 detect;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	detect = alcor_read8(priv, AU6601_DETECT_STATUS)
739*4882a593Smuzhiyun 		& AU6601_DETECT_STATUS_M;
740*4882a593Smuzhiyun 	/* check if card is present then send command and data */
741*4882a593Smuzhiyun 	return (detect == AU6601_SD_DETECTED);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
alcor_get_ro(struct mmc_host * mmc)744*4882a593Smuzhiyun static int alcor_get_ro(struct mmc_host *mmc)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
747*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
748*4882a593Smuzhiyun 	u8 status;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* get write protect pin status */
751*4882a593Smuzhiyun 	status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	return !!(status & AU6601_SD_CARD_WP);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
alcor_request(struct mmc_host * mmc,struct mmc_request * mrq)756*4882a593Smuzhiyun static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	mutex_lock(&host->cmd_mutex);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	host->mrq = mrq;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	/* check if card is present then send command and data */
765*4882a593Smuzhiyun 	if (alcor_get_cd(mmc))
766*4882a593Smuzhiyun 		alcor_send_cmd(host, mrq->cmd, true);
767*4882a593Smuzhiyun 	else {
768*4882a593Smuzhiyun 		mrq->cmd->error = -ENOMEDIUM;
769*4882a593Smuzhiyun 		alcor_request_complete(host, 1);
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	mutex_unlock(&host->cmd_mutex);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun 
alcor_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)775*4882a593Smuzhiyun static void alcor_pre_req(struct mmc_host *mmc,
776*4882a593Smuzhiyun 			   struct mmc_request *mrq)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
779*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
780*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
781*4882a593Smuzhiyun 	struct scatterlist *sg;
782*4882a593Smuzhiyun 	unsigned int i, sg_len;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (!data || !cmd)
785*4882a593Smuzhiyun 		return;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	data->host_cookie = COOKIE_UNMAPPED;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* FIXME: looks like the DMA engine works only with CMD18 */
790*4882a593Smuzhiyun 	if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
791*4882a593Smuzhiyun 			&& cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
792*4882a593Smuzhiyun 		return;
793*4882a593Smuzhiyun 	/*
794*4882a593Smuzhiyun 	 * We don't do DMA on "complex" transfers, i.e. with
795*4882a593Smuzhiyun 	 * non-word-aligned buffers or lengths. A future improvement
796*4882a593Smuzhiyun 	 * could be made to use temporary DMA bounce-buffers when these
797*4882a593Smuzhiyun 	 * requirements are not met.
798*4882a593Smuzhiyun 	 *
799*4882a593Smuzhiyun 	 * Also, we don't bother with all the DMA setup overhead for
800*4882a593Smuzhiyun 	 * short transfers.
801*4882a593Smuzhiyun 	 */
802*4882a593Smuzhiyun 	if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
803*4882a593Smuzhiyun 		return;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (data->blksz & 3)
806*4882a593Smuzhiyun 		return;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	for_each_sg(data->sg, sg, data->sg_len, i) {
809*4882a593Smuzhiyun 		if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
810*4882a593Smuzhiyun 			return;
811*4882a593Smuzhiyun 		if (sg->offset != 0)
812*4882a593Smuzhiyun 			return;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	/* This data might be unmapped at this time */
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
818*4882a593Smuzhiyun 			    mmc_get_dma_dir(data));
819*4882a593Smuzhiyun 	if (sg_len)
820*4882a593Smuzhiyun 		data->host_cookie = COOKIE_MAPPED;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	data->sg_count = sg_len;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
alcor_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)825*4882a593Smuzhiyun static void alcor_post_req(struct mmc_host *mmc,
826*4882a593Smuzhiyun 			    struct mmc_request *mrq,
827*4882a593Smuzhiyun 			    int err)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
830*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (!data)
833*4882a593Smuzhiyun 		return;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	if (data->host_cookie == COOKIE_MAPPED) {
836*4882a593Smuzhiyun 		dma_unmap_sg(host->dev,
837*4882a593Smuzhiyun 			     data->sg,
838*4882a593Smuzhiyun 			     data->sg_len,
839*4882a593Smuzhiyun 			     mmc_get_dma_dir(data));
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	data->host_cookie = COOKIE_UNMAPPED;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
alcor_set_power_mode(struct mmc_host * mmc,struct mmc_ios * ios)845*4882a593Smuzhiyun static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
848*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	switch (ios->power_mode) {
851*4882a593Smuzhiyun 	case MMC_POWER_OFF:
852*4882a593Smuzhiyun 		alcor_set_clock(host, ios->clock);
853*4882a593Smuzhiyun 		/* set all pins to input */
854*4882a593Smuzhiyun 		alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
855*4882a593Smuzhiyun 		/* turn of VDD */
856*4882a593Smuzhiyun 		alcor_write8(priv, 0, AU6601_POWER_CONTROL);
857*4882a593Smuzhiyun 		break;
858*4882a593Smuzhiyun 	case MMC_POWER_UP:
859*4882a593Smuzhiyun 		break;
860*4882a593Smuzhiyun 	case MMC_POWER_ON:
861*4882a593Smuzhiyun 		/* This is most trickiest part. The order and timings of
862*4882a593Smuzhiyun 		 * instructions seems to play important role. Any changes may
863*4882a593Smuzhiyun 		 * confuse internal state engine if this HW.
864*4882a593Smuzhiyun 		 * FIXME: If we will ever get access to documentation, then this
865*4882a593Smuzhiyun 		 * part should be reviewed again.
866*4882a593Smuzhiyun 		 */
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		/* enable SD card mode */
869*4882a593Smuzhiyun 		alcor_write8(priv, AU6601_SD_CARD,
870*4882a593Smuzhiyun 			      AU6601_ACTIVE_CTRL);
871*4882a593Smuzhiyun 		/* set signal voltage to 3.3V */
872*4882a593Smuzhiyun 		alcor_write8(priv, 0, AU6601_OPT);
873*4882a593Smuzhiyun 		/* no documentation about clk delay, for now just try to mimic
874*4882a593Smuzhiyun 		 * original driver.
875*4882a593Smuzhiyun 		 */
876*4882a593Smuzhiyun 		alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
877*4882a593Smuzhiyun 		/* set BUS width to 1 bit */
878*4882a593Smuzhiyun 		alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
879*4882a593Smuzhiyun 		/* set CLK first time */
880*4882a593Smuzhiyun 		alcor_set_clock(host, ios->clock);
881*4882a593Smuzhiyun 		/* power on VDD */
882*4882a593Smuzhiyun 		alcor_write8(priv, AU6601_SD_CARD,
883*4882a593Smuzhiyun 			      AU6601_POWER_CONTROL);
884*4882a593Smuzhiyun 		/* wait until the CLK will get stable */
885*4882a593Smuzhiyun 		mdelay(20);
886*4882a593Smuzhiyun 		/* set CLK again, mimic original driver. */
887*4882a593Smuzhiyun 		alcor_set_clock(host, ios->clock);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		/* enable output */
890*4882a593Smuzhiyun 		alcor_write8(priv, AU6601_SD_CARD,
891*4882a593Smuzhiyun 			      AU6601_OUTPUT_ENABLE);
892*4882a593Smuzhiyun 		/* The clk will not work on au6621. We need to trigger data
893*4882a593Smuzhiyun 		 * transfer.
894*4882a593Smuzhiyun 		 */
895*4882a593Smuzhiyun 		alcor_write8(priv, AU6601_DATA_WRITE,
896*4882a593Smuzhiyun 			      AU6601_DATA_XFER_CTRL);
897*4882a593Smuzhiyun 		/* configure timeout. Not clear what exactly it means. */
898*4882a593Smuzhiyun 		alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
899*4882a593Smuzhiyun 		mdelay(100);
900*4882a593Smuzhiyun 		break;
901*4882a593Smuzhiyun 	default:
902*4882a593Smuzhiyun 		dev_err(host->dev, "Unknown power parameter\n");
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
alcor_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)906*4882a593Smuzhiyun static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	mutex_lock(&host->cmd_mutex);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
913*4882a593Smuzhiyun 		ios->bus_width, ios->power_mode);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	if (ios->power_mode != host->cur_power_mode) {
916*4882a593Smuzhiyun 		alcor_set_power_mode(mmc, ios);
917*4882a593Smuzhiyun 		host->cur_power_mode = ios->power_mode;
918*4882a593Smuzhiyun 	} else {
919*4882a593Smuzhiyun 		alcor_set_timing(mmc, ios);
920*4882a593Smuzhiyun 		alcor_set_bus_width(mmc, ios);
921*4882a593Smuzhiyun 		alcor_set_clock(host, ios->clock);
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	mutex_unlock(&host->cmd_mutex);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
alcor_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)927*4882a593Smuzhiyun static int alcor_signal_voltage_switch(struct mmc_host *mmc,
928*4882a593Smuzhiyun 				       struct mmc_ios *ios)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	mutex_lock(&host->cmd_mutex);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	switch (ios->signal_voltage) {
935*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_330:
936*4882a593Smuzhiyun 		alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
937*4882a593Smuzhiyun 		break;
938*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_180:
939*4882a593Smuzhiyun 		alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
940*4882a593Smuzhiyun 		break;
941*4882a593Smuzhiyun 	default:
942*4882a593Smuzhiyun 		/* No signal voltage switch required */
943*4882a593Smuzhiyun 		break;
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	mutex_unlock(&host->cmd_mutex);
947*4882a593Smuzhiyun 	return 0;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun static const struct mmc_host_ops alcor_sdc_ops = {
951*4882a593Smuzhiyun 	.card_busy	= alcor_card_busy,
952*4882a593Smuzhiyun 	.get_cd		= alcor_get_cd,
953*4882a593Smuzhiyun 	.get_ro		= alcor_get_ro,
954*4882a593Smuzhiyun 	.post_req	= alcor_post_req,
955*4882a593Smuzhiyun 	.pre_req	= alcor_pre_req,
956*4882a593Smuzhiyun 	.request	= alcor_request,
957*4882a593Smuzhiyun 	.set_ios	= alcor_set_ios,
958*4882a593Smuzhiyun 	.start_signal_voltage_switch = alcor_signal_voltage_switch,
959*4882a593Smuzhiyun };
960*4882a593Smuzhiyun 
alcor_timeout_timer(struct work_struct * work)961*4882a593Smuzhiyun static void alcor_timeout_timer(struct work_struct *work)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	struct delayed_work *d = to_delayed_work(work);
964*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
965*4882a593Smuzhiyun 						timeout_work);
966*4882a593Smuzhiyun 	mutex_lock(&host->cmd_mutex);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	dev_dbg(host->dev, "triggered timeout\n");
969*4882a593Smuzhiyun 	if (host->mrq) {
970*4882a593Smuzhiyun 		dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 		if (host->data) {
973*4882a593Smuzhiyun 			host->data->error = -ETIMEDOUT;
974*4882a593Smuzhiyun 		} else {
975*4882a593Smuzhiyun 			if (host->cmd)
976*4882a593Smuzhiyun 				host->cmd->error = -ETIMEDOUT;
977*4882a593Smuzhiyun 			else
978*4882a593Smuzhiyun 				host->mrq->cmd->error = -ETIMEDOUT;
979*4882a593Smuzhiyun 		}
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 		alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
982*4882a593Smuzhiyun 		alcor_request_complete(host, 0);
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	mutex_unlock(&host->cmd_mutex);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
alcor_hw_init(struct alcor_sdmmc_host * host)988*4882a593Smuzhiyun static void alcor_hw_init(struct alcor_sdmmc_host *host)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
991*4882a593Smuzhiyun 	struct alcor_dev_cfg *cfg = priv->cfg;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	/* FIXME: This part is a mimics HW init of original driver.
994*4882a593Smuzhiyun 	 * If we will ever get access to documentation, then this part
995*4882a593Smuzhiyun 	 * should be reviewed again.
996*4882a593Smuzhiyun 	 */
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	/* reset command state engine */
999*4882a593Smuzhiyun 	alcor_reset(host, AU6601_RESET_CMD);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1002*4882a593Smuzhiyun 	/* enable sd card mode */
1003*4882a593Smuzhiyun 	alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* set BUS width to 1 bit */
1006*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	/* reset data state engine */
1009*4882a593Smuzhiyun 	alcor_reset(host, AU6601_RESET_DATA);
1010*4882a593Smuzhiyun 	/* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1011*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1014*4882a593Smuzhiyun 	/* not clear what we are doing here. */
1015*4882a593Smuzhiyun 	alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1016*4882a593Smuzhiyun 	alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1017*4882a593Smuzhiyun 	alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1020*4882a593Smuzhiyun 	 * exact meaning of this register is not clear.
1021*4882a593Smuzhiyun 	 */
1022*4882a593Smuzhiyun 	alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	/* make sure all pins are set to input and VDD is off */
1025*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1026*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1029*4882a593Smuzhiyun 	/* now we should be safe to enable IRQs */
1030*4882a593Smuzhiyun 	alcor_unmask_sd_irqs(host);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun 
alcor_hw_uninit(struct alcor_sdmmc_host * host)1033*4882a593Smuzhiyun static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = host->alcor_pci;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	alcor_mask_sd_irqs(host);
1038*4882a593Smuzhiyun 	alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1043*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	alcor_write8(priv, 0, AU6601_OPT);
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
alcor_init_mmc(struct alcor_sdmmc_host * host)1048*4882a593Smuzhiyun static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun 	struct mmc_host *mmc = mmc_from_priv(host);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	mmc->f_min = AU6601_MIN_CLOCK;
1053*4882a593Smuzhiyun 	mmc->f_max = AU6601_MAX_CLOCK;
1054*4882a593Smuzhiyun 	mmc->ocr_avail = MMC_VDD_33_34;
1055*4882a593Smuzhiyun 	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1056*4882a593Smuzhiyun 		| MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1057*4882a593Smuzhiyun 		| MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1058*4882a593Smuzhiyun 	mmc->caps2 = MMC_CAP2_NO_SDIO;
1059*4882a593Smuzhiyun 	mmc->ops = &alcor_sdc_ops;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	/* The hardware does DMA data transfer of 4096 bytes to/from a single
1062*4882a593Smuzhiyun 	 * buffer address. Scatterlists are not supported at the hardware
1063*4882a593Smuzhiyun 	 * level, however we can work with them at the driver level,
1064*4882a593Smuzhiyun 	 * provided that each segment is exactly 4096 bytes in size.
1065*4882a593Smuzhiyun 	 * Upon DMA completion of a single segment (signalled via IRQ), we
1066*4882a593Smuzhiyun 	 * immediately proceed to transfer the next segment from the
1067*4882a593Smuzhiyun 	 * scatterlist.
1068*4882a593Smuzhiyun 	 *
1069*4882a593Smuzhiyun 	 * The overall request is limited to 240 sectors, matching the
1070*4882a593Smuzhiyun 	 * original vendor driver.
1071*4882a593Smuzhiyun 	 */
1072*4882a593Smuzhiyun 	mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1073*4882a593Smuzhiyun 	mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1074*4882a593Smuzhiyun 	mmc->max_blk_count = 240;
1075*4882a593Smuzhiyun 	mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1076*4882a593Smuzhiyun 	dma_set_max_seg_size(host->dev, mmc->max_seg_size);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
alcor_pci_sdmmc_drv_probe(struct platform_device * pdev)1079*4882a593Smuzhiyun static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	struct alcor_pci_priv *priv = pdev->dev.platform_data;
1082*4882a593Smuzhiyun 	struct mmc_host *mmc;
1083*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host;
1084*4882a593Smuzhiyun 	int ret;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1087*4882a593Smuzhiyun 	if (!mmc) {
1088*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Can't allocate MMC\n");
1089*4882a593Smuzhiyun 		return -ENOMEM;
1090*4882a593Smuzhiyun 	}
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	host = mmc_priv(mmc);
1093*4882a593Smuzhiyun 	host->dev = &pdev->dev;
1094*4882a593Smuzhiyun 	host->cur_power_mode = MMC_POWER_UNDEFINED;
1095*4882a593Smuzhiyun 	host->alcor_pci = priv;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	/* make sure irqs are disabled */
1098*4882a593Smuzhiyun 	alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1099*4882a593Smuzhiyun 	alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1102*4882a593Smuzhiyun 			alcor_irq, alcor_irq_thread, IRQF_SHARED,
1103*4882a593Smuzhiyun 			DRV_NAME_ALCOR_PCI_SDMMC, host);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	if (ret) {
1106*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to get irq for data line\n");
1107*4882a593Smuzhiyun 		goto free_host;
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	mutex_init(&host->cmd_mutex);
1111*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	alcor_init_mmc(host);
1114*4882a593Smuzhiyun 	alcor_hw_init(host);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	dev_set_drvdata(&pdev->dev, host);
1117*4882a593Smuzhiyun 	mmc_add_host(mmc);
1118*4882a593Smuzhiyun 	return 0;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun free_host:
1121*4882a593Smuzhiyun 	mmc_free_host(mmc);
1122*4882a593Smuzhiyun 	return ret;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
alcor_pci_sdmmc_drv_remove(struct platform_device * pdev)1125*4882a593Smuzhiyun static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1128*4882a593Smuzhiyun 	struct mmc_host *mmc = mmc_from_priv(host);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	if (cancel_delayed_work_sync(&host->timeout_work))
1131*4882a593Smuzhiyun 		alcor_request_complete(host, 0);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	alcor_hw_uninit(host);
1134*4882a593Smuzhiyun 	mmc_remove_host(mmc);
1135*4882a593Smuzhiyun 	mmc_free_host(mmc);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	return 0;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
alcor_pci_sdmmc_suspend(struct device * dev)1141*4882a593Smuzhiyun static int alcor_pci_sdmmc_suspend(struct device *dev)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	if (cancel_delayed_work_sync(&host->timeout_work))
1146*4882a593Smuzhiyun 		alcor_request_complete(host, 0);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	alcor_hw_uninit(host);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	return 0;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
alcor_pci_sdmmc_resume(struct device * dev)1153*4882a593Smuzhiyun static int alcor_pci_sdmmc_resume(struct device *dev)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	alcor_hw_init(host);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	return 0;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1164*4882a593Smuzhiyun 			 alcor_pci_sdmmc_resume);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1167*4882a593Smuzhiyun 	{
1168*4882a593Smuzhiyun 		.name = DRV_NAME_ALCOR_PCI_SDMMC,
1169*4882a593Smuzhiyun 	}, {
1170*4882a593Smuzhiyun 		/* sentinel */
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun };
1173*4882a593Smuzhiyun MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun static struct platform_driver alcor_pci_sdmmc_driver = {
1176*4882a593Smuzhiyun 	.probe		= alcor_pci_sdmmc_drv_probe,
1177*4882a593Smuzhiyun 	.remove		= alcor_pci_sdmmc_drv_remove,
1178*4882a593Smuzhiyun 	.id_table	= alcor_pci_sdmmc_ids,
1179*4882a593Smuzhiyun 	.driver		= {
1180*4882a593Smuzhiyun 		.name	= DRV_NAME_ALCOR_PCI_SDMMC,
1181*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1182*4882a593Smuzhiyun 		.pm	= &alcor_mmc_pm_ops
1183*4882a593Smuzhiyun 	},
1184*4882a593Smuzhiyun };
1185*4882a593Smuzhiyun module_platform_driver(alcor_pci_sdmmc_driver);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1188*4882a593Smuzhiyun MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1189*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1190