xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/cavium.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
3*4882a593Smuzhiyun  * ThunderX SOCs.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
6*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
7*4882a593Smuzhiyun  * for more details.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (C) 2012-2017 Cavium Inc.
10*4882a593Smuzhiyun  * Authors:
11*4882a593Smuzhiyun  *   David Daney <david.daney@cavium.com>
12*4882a593Smuzhiyun  *   Peter Swain <pswain@cavium.com>
13*4882a593Smuzhiyun  *   Steven J. Hill <steven.hill@cavium.com>
14*4882a593Smuzhiyun  *   Jan Glauber <jglauber@cavium.com>
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun #include <linux/bitfield.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/dma-direction.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
23*4882a593Smuzhiyun #include <linux/mmc/slot-gpio.h>
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
26*4882a593Smuzhiyun #include <linux/scatterlist.h>
27*4882a593Smuzhiyun #include <linux/time.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "cavium.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun const char *cvm_mmc_irq_names[] = {
32*4882a593Smuzhiyun 	"MMC Buffer",
33*4882a593Smuzhiyun 	"MMC Command",
34*4882a593Smuzhiyun 	"MMC DMA",
35*4882a593Smuzhiyun 	"MMC Command Error",
36*4882a593Smuzhiyun 	"MMC DMA Error",
37*4882a593Smuzhiyun 	"MMC Switch",
38*4882a593Smuzhiyun 	"MMC Switch Error",
39*4882a593Smuzhiyun 	"MMC DMA int Fifo",
40*4882a593Smuzhiyun 	"MMC DMA int",
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * The Cavium MMC host hardware assumes that all commands have fixed
45*4882a593Smuzhiyun  * command and response types.  These are correct if MMC devices are
46*4882a593Smuzhiyun  * being used.  However, non-MMC devices like SD use command and
47*4882a593Smuzhiyun  * response types that are unexpected by the host hardware.
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * The command and response types can be overridden by supplying an
50*4882a593Smuzhiyun  * XOR value that is applied to the type.  We calculate the XOR value
51*4882a593Smuzhiyun  * from the values in this table and the flags passed from the MMC
52*4882a593Smuzhiyun  * core.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
55*4882a593Smuzhiyun 	{0, 0},		/* CMD0 */
56*4882a593Smuzhiyun 	{0, 3},		/* CMD1 */
57*4882a593Smuzhiyun 	{0, 2},		/* CMD2 */
58*4882a593Smuzhiyun 	{0, 1},		/* CMD3 */
59*4882a593Smuzhiyun 	{0, 0},		/* CMD4 */
60*4882a593Smuzhiyun 	{0, 1},		/* CMD5 */
61*4882a593Smuzhiyun 	{0, 1},		/* CMD6 */
62*4882a593Smuzhiyun 	{0, 1},		/* CMD7 */
63*4882a593Smuzhiyun 	{1, 1},		/* CMD8 */
64*4882a593Smuzhiyun 	{0, 2},		/* CMD9 */
65*4882a593Smuzhiyun 	{0, 2},		/* CMD10 */
66*4882a593Smuzhiyun 	{1, 1},		/* CMD11 */
67*4882a593Smuzhiyun 	{0, 1},		/* CMD12 */
68*4882a593Smuzhiyun 	{0, 1},		/* CMD13 */
69*4882a593Smuzhiyun 	{1, 1},		/* CMD14 */
70*4882a593Smuzhiyun 	{0, 0},		/* CMD15 */
71*4882a593Smuzhiyun 	{0, 1},		/* CMD16 */
72*4882a593Smuzhiyun 	{1, 1},		/* CMD17 */
73*4882a593Smuzhiyun 	{1, 1},		/* CMD18 */
74*4882a593Smuzhiyun 	{3, 1},		/* CMD19 */
75*4882a593Smuzhiyun 	{2, 1},		/* CMD20 */
76*4882a593Smuzhiyun 	{0, 0},		/* CMD21 */
77*4882a593Smuzhiyun 	{0, 0},		/* CMD22 */
78*4882a593Smuzhiyun 	{0, 1},		/* CMD23 */
79*4882a593Smuzhiyun 	{2, 1},		/* CMD24 */
80*4882a593Smuzhiyun 	{2, 1},		/* CMD25 */
81*4882a593Smuzhiyun 	{2, 1},		/* CMD26 */
82*4882a593Smuzhiyun 	{2, 1},		/* CMD27 */
83*4882a593Smuzhiyun 	{0, 1},		/* CMD28 */
84*4882a593Smuzhiyun 	{0, 1},		/* CMD29 */
85*4882a593Smuzhiyun 	{1, 1},		/* CMD30 */
86*4882a593Smuzhiyun 	{1, 1},		/* CMD31 */
87*4882a593Smuzhiyun 	{0, 0},		/* CMD32 */
88*4882a593Smuzhiyun 	{0, 0},		/* CMD33 */
89*4882a593Smuzhiyun 	{0, 0},		/* CMD34 */
90*4882a593Smuzhiyun 	{0, 1},		/* CMD35 */
91*4882a593Smuzhiyun 	{0, 1},		/* CMD36 */
92*4882a593Smuzhiyun 	{0, 0},		/* CMD37 */
93*4882a593Smuzhiyun 	{0, 1},		/* CMD38 */
94*4882a593Smuzhiyun 	{0, 4},		/* CMD39 */
95*4882a593Smuzhiyun 	{0, 5},		/* CMD40 */
96*4882a593Smuzhiyun 	{0, 0},		/* CMD41 */
97*4882a593Smuzhiyun 	{2, 1},		/* CMD42 */
98*4882a593Smuzhiyun 	{0, 0},		/* CMD43 */
99*4882a593Smuzhiyun 	{0, 0},		/* CMD44 */
100*4882a593Smuzhiyun 	{0, 0},		/* CMD45 */
101*4882a593Smuzhiyun 	{0, 0},		/* CMD46 */
102*4882a593Smuzhiyun 	{0, 0},		/* CMD47 */
103*4882a593Smuzhiyun 	{0, 0},		/* CMD48 */
104*4882a593Smuzhiyun 	{0, 0},		/* CMD49 */
105*4882a593Smuzhiyun 	{0, 0},		/* CMD50 */
106*4882a593Smuzhiyun 	{0, 0},		/* CMD51 */
107*4882a593Smuzhiyun 	{0, 0},		/* CMD52 */
108*4882a593Smuzhiyun 	{0, 0},		/* CMD53 */
109*4882a593Smuzhiyun 	{0, 0},		/* CMD54 */
110*4882a593Smuzhiyun 	{0, 1},		/* CMD55 */
111*4882a593Smuzhiyun 	{0xff, 0xff},	/* CMD56 */
112*4882a593Smuzhiyun 	{0, 0},		/* CMD57 */
113*4882a593Smuzhiyun 	{0, 0},		/* CMD58 */
114*4882a593Smuzhiyun 	{0, 0},		/* CMD59 */
115*4882a593Smuzhiyun 	{0, 0},		/* CMD60 */
116*4882a593Smuzhiyun 	{0, 0},		/* CMD61 */
117*4882a593Smuzhiyun 	{0, 0},		/* CMD62 */
118*4882a593Smuzhiyun 	{0, 0}		/* CMD63 */
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
cvm_mmc_get_cr_mods(struct mmc_command * cmd)121*4882a593Smuzhiyun static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct cvm_mmc_cr_type *cr;
124*4882a593Smuzhiyun 	u8 hardware_ctype, hardware_rtype;
125*4882a593Smuzhiyun 	u8 desired_ctype = 0, desired_rtype = 0;
126*4882a593Smuzhiyun 	struct cvm_mmc_cr_mods r;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129*4882a593Smuzhiyun 	hardware_ctype = cr->ctype;
130*4882a593Smuzhiyun 	hardware_rtype = cr->rtype;
131*4882a593Smuzhiyun 	if (cmd->opcode == MMC_GEN_CMD)
132*4882a593Smuzhiyun 		hardware_ctype = (cmd->arg & 1) ? 1 : 2;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	switch (mmc_cmd_type(cmd)) {
135*4882a593Smuzhiyun 	case MMC_CMD_ADTC:
136*4882a593Smuzhiyun 		desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
137*4882a593Smuzhiyun 		break;
138*4882a593Smuzhiyun 	case MMC_CMD_AC:
139*4882a593Smuzhiyun 	case MMC_CMD_BC:
140*4882a593Smuzhiyun 	case MMC_CMD_BCR:
141*4882a593Smuzhiyun 		desired_ctype = 0;
142*4882a593Smuzhiyun 		break;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	switch (mmc_resp_type(cmd)) {
146*4882a593Smuzhiyun 	case MMC_RSP_NONE:
147*4882a593Smuzhiyun 		desired_rtype = 0;
148*4882a593Smuzhiyun 		break;
149*4882a593Smuzhiyun 	case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
150*4882a593Smuzhiyun 	case MMC_RSP_R1B:
151*4882a593Smuzhiyun 		desired_rtype = 1;
152*4882a593Smuzhiyun 		break;
153*4882a593Smuzhiyun 	case MMC_RSP_R2:
154*4882a593Smuzhiyun 		desired_rtype = 2;
155*4882a593Smuzhiyun 		break;
156*4882a593Smuzhiyun 	case MMC_RSP_R3: /* MMC_RSP_R4 */
157*4882a593Smuzhiyun 		desired_rtype = 3;
158*4882a593Smuzhiyun 		break;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	r.ctype_xor = desired_ctype ^ hardware_ctype;
161*4882a593Smuzhiyun 	r.rtype_xor = desired_rtype ^ hardware_rtype;
162*4882a593Smuzhiyun 	return r;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
check_switch_errors(struct cvm_mmc_host * host)165*4882a593Smuzhiyun static void check_switch_errors(struct cvm_mmc_host *host)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	u64 emm_switch;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170*4882a593Smuzhiyun 	if (emm_switch & MIO_EMM_SWITCH_ERR0)
171*4882a593Smuzhiyun 		dev_err(host->dev, "Switch power class error\n");
172*4882a593Smuzhiyun 	if (emm_switch & MIO_EMM_SWITCH_ERR1)
173*4882a593Smuzhiyun 		dev_err(host->dev, "Switch hs timing error\n");
174*4882a593Smuzhiyun 	if (emm_switch & MIO_EMM_SWITCH_ERR2)
175*4882a593Smuzhiyun 		dev_err(host->dev, "Switch bus width error\n");
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
clear_bus_id(u64 * reg)178*4882a593Smuzhiyun static void clear_bus_id(u64 *reg)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	u64 bus_id_mask = GENMASK_ULL(61, 60);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	*reg &= ~bus_id_mask;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
set_bus_id(u64 * reg,int bus_id)185*4882a593Smuzhiyun static void set_bus_id(u64 *reg, int bus_id)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	clear_bus_id(reg);
188*4882a593Smuzhiyun 	*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
get_bus_id(u64 reg)191*4882a593Smuzhiyun static int get_bus_id(u64 reg)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	return FIELD_GET(GENMASK_ULL(61, 60), reg);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun  * We never set the switch_exe bit since that would interfere
198*4882a593Smuzhiyun  * with the commands send by the MMC core.
199*4882a593Smuzhiyun  */
do_switch(struct cvm_mmc_host * host,u64 emm_switch)200*4882a593Smuzhiyun static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	int retries = 100;
203*4882a593Smuzhiyun 	u64 rsp_sts;
204*4882a593Smuzhiyun 	int bus_id;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/*
207*4882a593Smuzhiyun 	 * Modes setting only taken from slot 0. Work around that hardware
208*4882a593Smuzhiyun 	 * issue by first switching to slot 0.
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	bus_id = get_bus_id(emm_switch);
211*4882a593Smuzhiyun 	clear_bus_id(&emm_switch);
212*4882a593Smuzhiyun 	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	set_bus_id(&emm_switch, bus_id);
215*4882a593Smuzhiyun 	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* wait for the switch to finish */
218*4882a593Smuzhiyun 	do {
219*4882a593Smuzhiyun 		rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220*4882a593Smuzhiyun 		if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
221*4882a593Smuzhiyun 			break;
222*4882a593Smuzhiyun 		udelay(10);
223*4882a593Smuzhiyun 	} while (--retries);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	check_switch_errors(host);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
switch_val_changed(struct cvm_mmc_slot * slot,u64 new_val)228*4882a593Smuzhiyun static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231*4882a593Smuzhiyun 	u64 match = 0x3001070fffffffffull;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return (slot->cached_switch & match) != (new_val & match);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
set_wdog(struct cvm_mmc_slot * slot,unsigned int ns)236*4882a593Smuzhiyun static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	u64 timeout;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (!slot->clock)
241*4882a593Smuzhiyun 		return;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (ns)
244*4882a593Smuzhiyun 		timeout = (slot->clock * ns) / NSEC_PER_SEC;
245*4882a593Smuzhiyun 	else
246*4882a593Smuzhiyun 		timeout = (slot->clock * 850ull) / 1000ull;
247*4882a593Smuzhiyun 	writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
cvm_mmc_reset_bus(struct cvm_mmc_slot * slot)250*4882a593Smuzhiyun static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
253*4882a593Smuzhiyun 	u64 emm_switch, wdog;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256*4882a593Smuzhiyun 	emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257*4882a593Smuzhiyun 			MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258*4882a593Smuzhiyun 	set_bus_id(&emm_switch, slot->bus_id);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261*4882a593Smuzhiyun 	do_switch(slot->host, emm_switch);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	slot->cached_switch = emm_switch;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	msleep(20);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /* Switch to another slot if needed */
cvm_mmc_switch_to(struct cvm_mmc_slot * slot)271*4882a593Smuzhiyun static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
274*4882a593Smuzhiyun 	struct cvm_mmc_slot *old_slot;
275*4882a593Smuzhiyun 	u64 emm_sample, emm_switch;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (slot->bus_id == host->last_slot)
278*4882a593Smuzhiyun 		return;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281*4882a593Smuzhiyun 		old_slot = host->slot[host->last_slot];
282*4882a593Smuzhiyun 		old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283*4882a593Smuzhiyun 		old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287*4882a593Smuzhiyun 	emm_switch = slot->cached_switch;
288*4882a593Smuzhiyun 	set_bus_id(&emm_switch, slot->bus_id);
289*4882a593Smuzhiyun 	do_switch(host, emm_switch);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292*4882a593Smuzhiyun 		     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293*4882a593Smuzhiyun 	writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	host->last_slot = slot->bus_id;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
do_read(struct cvm_mmc_host * host,struct mmc_request * req,u64 dbuf)298*4882a593Smuzhiyun static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
299*4882a593Smuzhiyun 		    u64 dbuf)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct sg_mapping_iter *smi = &host->smi;
302*4882a593Smuzhiyun 	int data_len = req->data->blocks * req->data->blksz;
303*4882a593Smuzhiyun 	int bytes_xfered, shift = -1;
304*4882a593Smuzhiyun 	u64 dat = 0;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* Auto inc from offset zero */
307*4882a593Smuzhiyun 	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	for (bytes_xfered = 0; bytes_xfered < data_len;) {
310*4882a593Smuzhiyun 		if (smi->consumed >= smi->length) {
311*4882a593Smuzhiyun 			if (!sg_miter_next(smi))
312*4882a593Smuzhiyun 				break;
313*4882a593Smuzhiyun 			smi->consumed = 0;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		if (shift < 0) {
317*4882a593Smuzhiyun 			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
318*4882a593Smuzhiyun 			shift = 56;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		while (smi->consumed < smi->length && shift >= 0) {
322*4882a593Smuzhiyun 			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
323*4882a593Smuzhiyun 			bytes_xfered++;
324*4882a593Smuzhiyun 			smi->consumed++;
325*4882a593Smuzhiyun 			shift -= 8;
326*4882a593Smuzhiyun 		}
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	sg_miter_stop(smi);
330*4882a593Smuzhiyun 	req->data->bytes_xfered = bytes_xfered;
331*4882a593Smuzhiyun 	req->data->error = 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
do_write(struct mmc_request * req)334*4882a593Smuzhiyun static void do_write(struct mmc_request *req)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337*4882a593Smuzhiyun 	req->data->error = 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
set_cmd_response(struct cvm_mmc_host * host,struct mmc_request * req,u64 rsp_sts)340*4882a593Smuzhiyun static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
341*4882a593Smuzhiyun 			     u64 rsp_sts)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	u64 rsp_hi, rsp_lo;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
346*4882a593Smuzhiyun 		return;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
351*4882a593Smuzhiyun 	case 1:
352*4882a593Smuzhiyun 	case 3:
353*4882a593Smuzhiyun 		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354*4882a593Smuzhiyun 		req->cmd->resp[1] = 0;
355*4882a593Smuzhiyun 		req->cmd->resp[2] = 0;
356*4882a593Smuzhiyun 		req->cmd->resp[3] = 0;
357*4882a593Smuzhiyun 		break;
358*4882a593Smuzhiyun 	case 2:
359*4882a593Smuzhiyun 		req->cmd->resp[3] = rsp_lo & 0xffffffff;
360*4882a593Smuzhiyun 		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361*4882a593Smuzhiyun 		rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362*4882a593Smuzhiyun 		req->cmd->resp[1] = rsp_hi & 0xffffffff;
363*4882a593Smuzhiyun 		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
364*4882a593Smuzhiyun 		break;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
get_dma_dir(struct mmc_data * data)368*4882a593Smuzhiyun static int get_dma_dir(struct mmc_data *data)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
finish_dma_single(struct cvm_mmc_host * host,struct mmc_data * data)373*4882a593Smuzhiyun static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	data->bytes_xfered = data->blocks * data->blksz;
376*4882a593Smuzhiyun 	data->error = 0;
377*4882a593Smuzhiyun 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
378*4882a593Smuzhiyun 	return 1;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
finish_dma_sg(struct cvm_mmc_host * host,struct mmc_data * data)381*4882a593Smuzhiyun static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	u64 fifo_cfg;
384*4882a593Smuzhiyun 	int count;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Check if there are any pending requests left */
387*4882a593Smuzhiyun 	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
388*4882a593Smuzhiyun 	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
389*4882a593Smuzhiyun 	if (count)
390*4882a593Smuzhiyun 		dev_err(host->dev, "%u requests still pending\n", count);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	data->bytes_xfered = data->blocks * data->blksz;
393*4882a593Smuzhiyun 	data->error = 0;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* Clear and disable FIFO */
396*4882a593Smuzhiyun 	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
397*4882a593Smuzhiyun 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
398*4882a593Smuzhiyun 	return 1;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
finish_dma(struct cvm_mmc_host * host,struct mmc_data * data)401*4882a593Smuzhiyun static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	if (host->use_sg && data->sg_len > 1)
404*4882a593Smuzhiyun 		return finish_dma_sg(host, data);
405*4882a593Smuzhiyun 	else
406*4882a593Smuzhiyun 		return finish_dma_single(host, data);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
check_status(u64 rsp_sts)409*4882a593Smuzhiyun static int check_status(u64 rsp_sts)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
412*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
413*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
414*4882a593Smuzhiyun 		return -EILSEQ;
415*4882a593Smuzhiyun 	if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
416*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
417*4882a593Smuzhiyun 		return -ETIMEDOUT;
418*4882a593Smuzhiyun 	if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
419*4882a593Smuzhiyun 		return -EIO;
420*4882a593Smuzhiyun 	return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* Try to clean up failed DMA. */
cleanup_dma(struct cvm_mmc_host * host,u64 rsp_sts)424*4882a593Smuzhiyun static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	u64 emm_dma;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	emm_dma = readq(host->base + MIO_EMM_DMA(host));
429*4882a593Smuzhiyun 	emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
430*4882a593Smuzhiyun 		   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
431*4882a593Smuzhiyun 	set_bus_id(&emm_dma, get_bus_id(rsp_sts));
432*4882a593Smuzhiyun 	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
cvm_mmc_interrupt(int irq,void * dev_id)435*4882a593Smuzhiyun irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct cvm_mmc_host *host = dev_id;
438*4882a593Smuzhiyun 	struct mmc_request *req;
439*4882a593Smuzhiyun 	unsigned long flags = 0;
440*4882a593Smuzhiyun 	u64 emm_int, rsp_sts;
441*4882a593Smuzhiyun 	bool host_done;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (host->need_irq_handler_lock)
444*4882a593Smuzhiyun 		spin_lock_irqsave(&host->irq_handler_lock, flags);
445*4882a593Smuzhiyun 	else
446*4882a593Smuzhiyun 		__acquire(&host->irq_handler_lock);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* Clear interrupt bits (write 1 clears ). */
449*4882a593Smuzhiyun 	emm_int = readq(host->base + MIO_EMM_INT(host));
450*4882a593Smuzhiyun 	writeq(emm_int, host->base + MIO_EMM_INT(host));
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
453*4882a593Smuzhiyun 		check_switch_errors(host);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	req = host->current_req;
456*4882a593Smuzhiyun 	if (!req)
457*4882a593Smuzhiyun 		goto out;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
460*4882a593Smuzhiyun 	/*
461*4882a593Smuzhiyun 	 * dma_val set means DMA is still in progress. Don't touch
462*4882a593Smuzhiyun 	 * the request and wait for the interrupt indicating that
463*4882a593Smuzhiyun 	 * the DMA is finished.
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
466*4882a593Smuzhiyun 		goto out;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (!host->dma_active && req->data &&
469*4882a593Smuzhiyun 	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
470*4882a593Smuzhiyun 		unsigned int type = (rsp_sts >> 7) & 3;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		if (type == 1)
473*4882a593Smuzhiyun 			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
474*4882a593Smuzhiyun 		else if (type == 2)
475*4882a593Smuzhiyun 			do_write(req);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
479*4882a593Smuzhiyun 		    emm_int & MIO_EMM_INT_DMA_DONE ||
480*4882a593Smuzhiyun 		    emm_int & MIO_EMM_INT_CMD_ERR  ||
481*4882a593Smuzhiyun 		    emm_int & MIO_EMM_INT_DMA_ERR;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (!(host_done && req->done))
484*4882a593Smuzhiyun 		goto no_req_done;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	req->cmd->error = check_status(rsp_sts);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (host->dma_active && req->data)
489*4882a593Smuzhiyun 		if (!finish_dma(host, req->data))
490*4882a593Smuzhiyun 			goto no_req_done;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	set_cmd_response(host, req, rsp_sts);
493*4882a593Smuzhiyun 	if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
494*4882a593Smuzhiyun 	    (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
495*4882a593Smuzhiyun 		cleanup_dma(host, rsp_sts);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	host->current_req = NULL;
498*4882a593Smuzhiyun 	req->done(req);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun no_req_done:
501*4882a593Smuzhiyun 	if (host->dmar_fixup_done)
502*4882a593Smuzhiyun 		host->dmar_fixup_done(host);
503*4882a593Smuzhiyun 	if (host_done)
504*4882a593Smuzhiyun 		host->release_bus(host);
505*4882a593Smuzhiyun out:
506*4882a593Smuzhiyun 	if (host->need_irq_handler_lock)
507*4882a593Smuzhiyun 		spin_unlock_irqrestore(&host->irq_handler_lock, flags);
508*4882a593Smuzhiyun 	else
509*4882a593Smuzhiyun 		__release(&host->irq_handler_lock);
510*4882a593Smuzhiyun 	return IRQ_RETVAL(emm_int != 0);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun  * Program DMA_CFG and if needed DMA_ADR.
515*4882a593Smuzhiyun  * Returns 0 on error, DMA address otherwise.
516*4882a593Smuzhiyun  */
prepare_dma_single(struct cvm_mmc_host * host,struct mmc_data * data)517*4882a593Smuzhiyun static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	u64 dma_cfg, addr;
520*4882a593Smuzhiyun 	int count, rw;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	count = dma_map_sg(host->dev, data->sg, data->sg_len,
523*4882a593Smuzhiyun 			   get_dma_dir(data));
524*4882a593Smuzhiyun 	if (!count)
525*4882a593Smuzhiyun 		return 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
528*4882a593Smuzhiyun 	dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
529*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
530*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
531*4882a593Smuzhiyun 	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
532*4882a593Smuzhiyun #endif
533*4882a593Smuzhiyun 	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
534*4882a593Smuzhiyun 			      (sg_dma_len(&data->sg[0]) / 8) - 1);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	addr = sg_dma_address(&data->sg[0]);
537*4882a593Smuzhiyun 	if (!host->big_dma_addr)
538*4882a593Smuzhiyun 		dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
539*4882a593Smuzhiyun 	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
542*4882a593Smuzhiyun 		 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (host->big_dma_addr)
545*4882a593Smuzhiyun 		writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
546*4882a593Smuzhiyun 	return addr;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun  * Queue complete sg list into the FIFO.
551*4882a593Smuzhiyun  * Returns 0 on error, 1 otherwise.
552*4882a593Smuzhiyun  */
prepare_dma_sg(struct cvm_mmc_host * host,struct mmc_data * data)553*4882a593Smuzhiyun static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct scatterlist *sg;
556*4882a593Smuzhiyun 	u64 fifo_cmd, addr;
557*4882a593Smuzhiyun 	int count, i, rw;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	count = dma_map_sg(host->dev, data->sg, data->sg_len,
560*4882a593Smuzhiyun 			   get_dma_dir(data));
561*4882a593Smuzhiyun 	if (!count)
562*4882a593Smuzhiyun 		return 0;
563*4882a593Smuzhiyun 	if (count > 16)
564*4882a593Smuzhiyun 		goto error;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* Enable FIFO by removing CLR bit */
567*4882a593Smuzhiyun 	writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	for_each_sg(data->sg, sg, count, i) {
570*4882a593Smuzhiyun 		/* Program DMA address */
571*4882a593Smuzhiyun 		addr = sg_dma_address(sg);
572*4882a593Smuzhiyun 		if (addr & 7)
573*4882a593Smuzhiyun 			goto error;
574*4882a593Smuzhiyun 		writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 		/*
577*4882a593Smuzhiyun 		 * If we have scatter-gather support we also have an extra
578*4882a593Smuzhiyun 		 * register for the DMA addr, so no need to check
579*4882a593Smuzhiyun 		 * host->big_dma_addr here.
580*4882a593Smuzhiyun 		 */
581*4882a593Smuzhiyun 		rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
582*4882a593Smuzhiyun 		fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		/* enable interrupts on the last element */
585*4882a593Smuzhiyun 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
586*4882a593Smuzhiyun 				       (i + 1 == count) ? 0 : 1);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
589*4882a593Smuzhiyun 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
590*4882a593Smuzhiyun #endif
591*4882a593Smuzhiyun 		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
592*4882a593Smuzhiyun 				       sg_dma_len(sg) / 8 - 1);
593*4882a593Smuzhiyun 		/*
594*4882a593Smuzhiyun 		 * The write copies the address and the command to the FIFO
595*4882a593Smuzhiyun 		 * and increments the FIFO's COUNT field.
596*4882a593Smuzhiyun 		 */
597*4882a593Smuzhiyun 		writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
598*4882a593Smuzhiyun 		pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
599*4882a593Smuzhiyun 			 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	/*
603*4882a593Smuzhiyun 	 * In difference to prepare_dma_single we don't return the
604*4882a593Smuzhiyun 	 * address here, as it would not make sense for scatter-gather.
605*4882a593Smuzhiyun 	 * The dma fixup is only required on models that don't support
606*4882a593Smuzhiyun 	 * scatter-gather, so that is not a problem.
607*4882a593Smuzhiyun 	 */
608*4882a593Smuzhiyun 	return 1;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun error:
611*4882a593Smuzhiyun 	WARN_ON_ONCE(1);
612*4882a593Smuzhiyun 	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
613*4882a593Smuzhiyun 	/* Disable FIFO */
614*4882a593Smuzhiyun 	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
615*4882a593Smuzhiyun 	return 0;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
prepare_dma(struct cvm_mmc_host * host,struct mmc_data * data)618*4882a593Smuzhiyun static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	if (host->use_sg && data->sg_len > 1)
621*4882a593Smuzhiyun 		return prepare_dma_sg(host, data);
622*4882a593Smuzhiyun 	else
623*4882a593Smuzhiyun 		return prepare_dma_single(host, data);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
prepare_ext_dma(struct mmc_host * mmc,struct mmc_request * mrq)626*4882a593Smuzhiyun static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
629*4882a593Smuzhiyun 	u64 emm_dma;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
632*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_DMA_SECTOR,
633*4882a593Smuzhiyun 			     mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
634*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_DMA_RW,
635*4882a593Smuzhiyun 			     (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
636*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
637*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
638*4882a593Smuzhiyun 	set_bus_id(&emm_dma, slot->bus_id);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
641*4882a593Smuzhiyun 	    (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
642*4882a593Smuzhiyun 		emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	pr_debug("[%s] blocks: %u  multi: %d\n",
645*4882a593Smuzhiyun 		(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
646*4882a593Smuzhiyun 		 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
647*4882a593Smuzhiyun 	return emm_dma;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun 
cvm_mmc_dma_request(struct mmc_host * mmc,struct mmc_request * mrq)650*4882a593Smuzhiyun static void cvm_mmc_dma_request(struct mmc_host *mmc,
651*4882a593Smuzhiyun 				struct mmc_request *mrq)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
654*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
655*4882a593Smuzhiyun 	struct mmc_data *data;
656*4882a593Smuzhiyun 	u64 emm_dma, addr;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
659*4882a593Smuzhiyun 	    !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
660*4882a593Smuzhiyun 		dev_err(&mmc->card->dev,
661*4882a593Smuzhiyun 			"Error: cmv_mmc_dma_request no data\n");
662*4882a593Smuzhiyun 		goto error;
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	cvm_mmc_switch_to(slot);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	data = mrq->data;
668*4882a593Smuzhiyun 	pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
669*4882a593Smuzhiyun 		 data->blocks, data->blksz, data->blocks * data->blksz);
670*4882a593Smuzhiyun 	if (data->timeout_ns)
671*4882a593Smuzhiyun 		set_wdog(slot, data->timeout_ns);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	WARN_ON(host->current_req);
674*4882a593Smuzhiyun 	host->current_req = mrq;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	emm_dma = prepare_ext_dma(mmc, mrq);
677*4882a593Smuzhiyun 	addr = prepare_dma(host, data);
678*4882a593Smuzhiyun 	if (!addr) {
679*4882a593Smuzhiyun 		dev_err(host->dev, "prepare_dma failed\n");
680*4882a593Smuzhiyun 		goto error;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	host->dma_active = true;
684*4882a593Smuzhiyun 	host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
685*4882a593Smuzhiyun 			 MIO_EMM_INT_DMA_ERR);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	if (host->dmar_fixup)
688*4882a593Smuzhiyun 		host->dmar_fixup(host, mrq->cmd, data, addr);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/*
691*4882a593Smuzhiyun 	 * If we have a valid SD card in the slot, we set the response
692*4882a593Smuzhiyun 	 * bit mask to check for CRC errors and timeouts only.
693*4882a593Smuzhiyun 	 * Otherwise, use the default power reset value.
694*4882a593Smuzhiyun 	 */
695*4882a593Smuzhiyun 	if (mmc_card_sd(mmc->card))
696*4882a593Smuzhiyun 		writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
697*4882a593Smuzhiyun 	else
698*4882a593Smuzhiyun 		writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
699*4882a593Smuzhiyun 	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
700*4882a593Smuzhiyun 	return;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun error:
703*4882a593Smuzhiyun 	mrq->cmd->error = -EINVAL;
704*4882a593Smuzhiyun 	if (mrq->done)
705*4882a593Smuzhiyun 		mrq->done(mrq);
706*4882a593Smuzhiyun 	host->release_bus(host);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun 
do_read_request(struct cvm_mmc_host * host,struct mmc_request * mrq)709*4882a593Smuzhiyun static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
712*4882a593Smuzhiyun 		       SG_MITER_ATOMIC | SG_MITER_TO_SG);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
do_write_request(struct cvm_mmc_host * host,struct mmc_request * mrq)715*4882a593Smuzhiyun static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
718*4882a593Smuzhiyun 	struct sg_mapping_iter *smi = &host->smi;
719*4882a593Smuzhiyun 	unsigned int bytes_xfered;
720*4882a593Smuzhiyun 	int shift = 56;
721*4882a593Smuzhiyun 	u64 dat = 0;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	/* Copy data to the xmit buffer before issuing the command. */
724*4882a593Smuzhiyun 	sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	/* Auto inc from offset zero, dbuf zero */
727*4882a593Smuzhiyun 	writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	for (bytes_xfered = 0; bytes_xfered < data_len;) {
730*4882a593Smuzhiyun 		if (smi->consumed >= smi->length) {
731*4882a593Smuzhiyun 			if (!sg_miter_next(smi))
732*4882a593Smuzhiyun 				break;
733*4882a593Smuzhiyun 			smi->consumed = 0;
734*4882a593Smuzhiyun 		}
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		while (smi->consumed < smi->length && shift >= 0) {
737*4882a593Smuzhiyun 			dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
738*4882a593Smuzhiyun 			bytes_xfered++;
739*4882a593Smuzhiyun 			smi->consumed++;
740*4882a593Smuzhiyun 			shift -= 8;
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		if (shift < 0) {
744*4882a593Smuzhiyun 			writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
745*4882a593Smuzhiyun 			shift = 56;
746*4882a593Smuzhiyun 			dat = 0;
747*4882a593Smuzhiyun 		}
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 	sg_miter_stop(smi);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
cvm_mmc_request(struct mmc_host * mmc,struct mmc_request * mrq)752*4882a593Smuzhiyun static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
755*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
756*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
757*4882a593Smuzhiyun 	struct cvm_mmc_cr_mods mods;
758*4882a593Smuzhiyun 	u64 emm_cmd, rsp_sts;
759*4882a593Smuzhiyun 	int retries = 100;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	/*
762*4882a593Smuzhiyun 	 * Note about locking:
763*4882a593Smuzhiyun 	 * All MMC devices share the same bus and controller. Allow only a
764*4882a593Smuzhiyun 	 * single user of the bootbus/MMC bus at a time. The lock is acquired
765*4882a593Smuzhiyun 	 * on all entry points from the MMC layer.
766*4882a593Smuzhiyun 	 *
767*4882a593Smuzhiyun 	 * For requests the lock is only released after the completion
768*4882a593Smuzhiyun 	 * interrupt!
769*4882a593Smuzhiyun 	 */
770*4882a593Smuzhiyun 	host->acquire_bus(host);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
773*4882a593Smuzhiyun 	    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
774*4882a593Smuzhiyun 		return cvm_mmc_dma_request(mmc, mrq);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	cvm_mmc_switch_to(slot);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	mods = cvm_mmc_get_cr_mods(cmd);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	WARN_ON(host->current_req);
781*4882a593Smuzhiyun 	host->current_req = mrq;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (cmd->data) {
784*4882a593Smuzhiyun 		if (cmd->data->flags & MMC_DATA_READ)
785*4882a593Smuzhiyun 			do_read_request(host, mrq);
786*4882a593Smuzhiyun 		else
787*4882a593Smuzhiyun 			do_write_request(host, mrq);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		if (cmd->data->timeout_ns)
790*4882a593Smuzhiyun 			set_wdog(slot, cmd->data->timeout_ns);
791*4882a593Smuzhiyun 	} else
792*4882a593Smuzhiyun 		set_wdog(slot, 0);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	host->dma_active = false;
795*4882a593Smuzhiyun 	host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
798*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
799*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
800*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
801*4882a593Smuzhiyun 		  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
802*4882a593Smuzhiyun 	set_bus_id(&emm_cmd, slot->bus_id);
803*4882a593Smuzhiyun 	if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
804*4882a593Smuzhiyun 		emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
805*4882a593Smuzhiyun 				64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	writeq(0, host->base + MIO_EMM_STS_MASK(host));
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun retry:
810*4882a593Smuzhiyun 	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
811*4882a593Smuzhiyun 	if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
812*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
813*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
814*4882a593Smuzhiyun 	    rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
815*4882a593Smuzhiyun 		udelay(10);
816*4882a593Smuzhiyun 		if (--retries)
817*4882a593Smuzhiyun 			goto retry;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 	if (!retries)
820*4882a593Smuzhiyun 		dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
821*4882a593Smuzhiyun 	writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
cvm_mmc_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)824*4882a593Smuzhiyun static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	struct cvm_mmc_slot *slot = mmc_priv(mmc);
827*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
828*4882a593Smuzhiyun 	int clk_period = 0, power_class = 10, bus_width = 0;
829*4882a593Smuzhiyun 	u64 clock, emm_switch;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	host->acquire_bus(host);
832*4882a593Smuzhiyun 	cvm_mmc_switch_to(slot);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Set the power state */
835*4882a593Smuzhiyun 	switch (ios->power_mode) {
836*4882a593Smuzhiyun 	case MMC_POWER_ON:
837*4882a593Smuzhiyun 		break;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	case MMC_POWER_OFF:
840*4882a593Smuzhiyun 		cvm_mmc_reset_bus(slot);
841*4882a593Smuzhiyun 		if (host->global_pwr_gpiod)
842*4882a593Smuzhiyun 			host->set_shared_power(host, 0);
843*4882a593Smuzhiyun 		else if (!IS_ERR(mmc->supply.vmmc))
844*4882a593Smuzhiyun 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
845*4882a593Smuzhiyun 		break;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	case MMC_POWER_UP:
848*4882a593Smuzhiyun 		if (host->global_pwr_gpiod)
849*4882a593Smuzhiyun 			host->set_shared_power(host, 1);
850*4882a593Smuzhiyun 		else if (!IS_ERR(mmc->supply.vmmc))
851*4882a593Smuzhiyun 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
852*4882a593Smuzhiyun 		break;
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	/* Convert bus width to HW definition */
856*4882a593Smuzhiyun 	switch (ios->bus_width) {
857*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_8:
858*4882a593Smuzhiyun 		bus_width = 2;
859*4882a593Smuzhiyun 		break;
860*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_4:
861*4882a593Smuzhiyun 		bus_width = 1;
862*4882a593Smuzhiyun 		break;
863*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_1:
864*4882a593Smuzhiyun 		bus_width = 0;
865*4882a593Smuzhiyun 		break;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* DDR is available for 4/8 bit bus width */
869*4882a593Smuzhiyun 	if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
870*4882a593Smuzhiyun 		bus_width |= 4;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Change the clock frequency. */
873*4882a593Smuzhiyun 	clock = ios->clock;
874*4882a593Smuzhiyun 	if (clock > 52000000)
875*4882a593Smuzhiyun 		clock = 52000000;
876*4882a593Smuzhiyun 	slot->clock = clock;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (clock)
879*4882a593Smuzhiyun 		clk_period = (host->sys_freq + clock - 1) / (2 * clock);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
882*4882a593Smuzhiyun 				(ios->timing == MMC_TIMING_MMC_HS)) |
883*4882a593Smuzhiyun 		     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
884*4882a593Smuzhiyun 		     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
885*4882a593Smuzhiyun 		     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
886*4882a593Smuzhiyun 		     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
887*4882a593Smuzhiyun 	set_bus_id(&emm_switch, slot->bus_id);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (!switch_val_changed(slot, emm_switch))
890*4882a593Smuzhiyun 		goto out;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	set_wdog(slot, 0);
893*4882a593Smuzhiyun 	do_switch(host, emm_switch);
894*4882a593Smuzhiyun 	slot->cached_switch = emm_switch;
895*4882a593Smuzhiyun out:
896*4882a593Smuzhiyun 	host->release_bus(host);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun static const struct mmc_host_ops cvm_mmc_ops = {
900*4882a593Smuzhiyun 	.request        = cvm_mmc_request,
901*4882a593Smuzhiyun 	.set_ios        = cvm_mmc_set_ios,
902*4882a593Smuzhiyun 	.get_ro		= mmc_gpio_get_ro,
903*4882a593Smuzhiyun 	.get_cd		= mmc_gpio_get_cd,
904*4882a593Smuzhiyun };
905*4882a593Smuzhiyun 
cvm_mmc_set_clock(struct cvm_mmc_slot * slot,unsigned int clock)906*4882a593Smuzhiyun static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	struct mmc_host *mmc = slot->mmc;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	clock = min(clock, mmc->f_max);
911*4882a593Smuzhiyun 	clock = max(clock, mmc->f_min);
912*4882a593Smuzhiyun 	slot->clock = clock;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
cvm_mmc_init_lowlevel(struct cvm_mmc_slot * slot)915*4882a593Smuzhiyun static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	struct cvm_mmc_host *host = slot->host;
918*4882a593Smuzhiyun 	u64 emm_switch;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* Enable this bus slot. */
921*4882a593Smuzhiyun 	host->emm_cfg |= (1ull << slot->bus_id);
922*4882a593Smuzhiyun 	writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
923*4882a593Smuzhiyun 	udelay(10);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	/* Program initial clock speed and power. */
926*4882a593Smuzhiyun 	cvm_mmc_set_clock(slot, slot->mmc->f_min);
927*4882a593Smuzhiyun 	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
928*4882a593Smuzhiyun 	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
929*4882a593Smuzhiyun 				 (host->sys_freq / slot->clock) / 2);
930*4882a593Smuzhiyun 	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
931*4882a593Smuzhiyun 				 (host->sys_freq / slot->clock) / 2);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	/* Make the changes take effect on this bus slot. */
934*4882a593Smuzhiyun 	set_bus_id(&emm_switch, slot->bus_id);
935*4882a593Smuzhiyun 	do_switch(host, emm_switch);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	slot->cached_switch = emm_switch;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	/*
940*4882a593Smuzhiyun 	 * Set watchdog timeout value and default reset value
941*4882a593Smuzhiyun 	 * for the mask register. Finally, set the CARD_RCA
942*4882a593Smuzhiyun 	 * bit so that we can get the card address relative
943*4882a593Smuzhiyun 	 * to the CMD register for CMD7 transactions.
944*4882a593Smuzhiyun 	 */
945*4882a593Smuzhiyun 	set_wdog(slot, 0);
946*4882a593Smuzhiyun 	writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
947*4882a593Smuzhiyun 	writeq(1, host->base + MIO_EMM_RCA(host));
948*4882a593Smuzhiyun 	return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
cvm_mmc_of_parse(struct device * dev,struct cvm_mmc_slot * slot)951*4882a593Smuzhiyun static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
954*4882a593Smuzhiyun 	struct device_node *node = dev->of_node;
955*4882a593Smuzhiyun 	struct mmc_host *mmc = slot->mmc;
956*4882a593Smuzhiyun 	u64 clock_period;
957*4882a593Smuzhiyun 	int ret;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	ret = of_property_read_u32(node, "reg", &id);
960*4882a593Smuzhiyun 	if (ret) {
961*4882a593Smuzhiyun 		dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
962*4882a593Smuzhiyun 		return ret;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
966*4882a593Smuzhiyun 		dev_err(dev, "Invalid reg property on %pOF\n", node);
967*4882a593Smuzhiyun 		return -EINVAL;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	ret = mmc_regulator_get_supply(mmc);
971*4882a593Smuzhiyun 	if (ret)
972*4882a593Smuzhiyun 		return ret;
973*4882a593Smuzhiyun 	/*
974*4882a593Smuzhiyun 	 * Legacy Octeon firmware has no regulator entry, fall-back to
975*4882a593Smuzhiyun 	 * a hard-coded voltage to get a sane OCR.
976*4882a593Smuzhiyun 	 */
977*4882a593Smuzhiyun 	if (IS_ERR(mmc->supply.vmmc))
978*4882a593Smuzhiyun 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/* Common MMC bindings */
981*4882a593Smuzhiyun 	ret = mmc_of_parse(mmc);
982*4882a593Smuzhiyun 	if (ret)
983*4882a593Smuzhiyun 		return ret;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* Set bus width */
986*4882a593Smuzhiyun 	if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
987*4882a593Smuzhiyun 		of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
988*4882a593Smuzhiyun 		if (bus_width == 8)
989*4882a593Smuzhiyun 			mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
990*4882a593Smuzhiyun 		else if (bus_width == 4)
991*4882a593Smuzhiyun 			mmc->caps |= MMC_CAP_4_BIT_DATA;
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/* Set maximum and minimum frequency */
995*4882a593Smuzhiyun 	if (!mmc->f_max)
996*4882a593Smuzhiyun 		of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
997*4882a593Smuzhiyun 	if (!mmc->f_max || mmc->f_max > 52000000)
998*4882a593Smuzhiyun 		mmc->f_max = 52000000;
999*4882a593Smuzhiyun 	mmc->f_min = 400000;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	/* Sampling register settings, period in picoseconds */
1002*4882a593Smuzhiyun 	clock_period = 1000000000000ull / slot->host->sys_freq;
1003*4882a593Smuzhiyun 	of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1004*4882a593Smuzhiyun 	of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1005*4882a593Smuzhiyun 	slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1006*4882a593Smuzhiyun 	slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	return id;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
cvm_mmc_of_slot_probe(struct device * dev,struct cvm_mmc_host * host)1011*4882a593Smuzhiyun int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun 	struct cvm_mmc_slot *slot;
1014*4882a593Smuzhiyun 	struct mmc_host *mmc;
1015*4882a593Smuzhiyun 	int ret, id;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1018*4882a593Smuzhiyun 	if (!mmc)
1019*4882a593Smuzhiyun 		return -ENOMEM;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	slot = mmc_priv(mmc);
1022*4882a593Smuzhiyun 	slot->mmc = mmc;
1023*4882a593Smuzhiyun 	slot->host = host;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	ret = cvm_mmc_of_parse(dev, slot);
1026*4882a593Smuzhiyun 	if (ret < 0)
1027*4882a593Smuzhiyun 		goto error;
1028*4882a593Smuzhiyun 	id = ret;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	/* Set up host parameters */
1031*4882a593Smuzhiyun 	mmc->ops = &cvm_mmc_ops;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	/*
1034*4882a593Smuzhiyun 	 * We only have a 3.3v supply, we cannot support any
1035*4882a593Smuzhiyun 	 * of the UHS modes. We do support the high speed DDR
1036*4882a593Smuzhiyun 	 * modes up to 52MHz.
1037*4882a593Smuzhiyun 	 *
1038*4882a593Smuzhiyun 	 * Disable bounce buffers for max_segs = 1
1039*4882a593Smuzhiyun 	 */
1040*4882a593Smuzhiyun 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1041*4882a593Smuzhiyun 		     MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (host->use_sg)
1044*4882a593Smuzhiyun 		mmc->max_segs = 16;
1045*4882a593Smuzhiyun 	else
1046*4882a593Smuzhiyun 		mmc->max_segs = 1;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	/* DMA size field can address up to 8 MB */
1049*4882a593Smuzhiyun 	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1050*4882a593Smuzhiyun 				  dma_get_max_seg_size(host->dev));
1051*4882a593Smuzhiyun 	mmc->max_req_size = mmc->max_seg_size;
1052*4882a593Smuzhiyun 	/* External DMA is in 512 byte blocks */
1053*4882a593Smuzhiyun 	mmc->max_blk_size = 512;
1054*4882a593Smuzhiyun 	/* DMA block count field is 15 bits */
1055*4882a593Smuzhiyun 	mmc->max_blk_count = 32767;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	slot->clock = mmc->f_min;
1058*4882a593Smuzhiyun 	slot->bus_id = id;
1059*4882a593Smuzhiyun 	slot->cached_rca = 1;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	host->acquire_bus(host);
1062*4882a593Smuzhiyun 	host->slot[id] = slot;
1063*4882a593Smuzhiyun 	cvm_mmc_switch_to(slot);
1064*4882a593Smuzhiyun 	cvm_mmc_init_lowlevel(slot);
1065*4882a593Smuzhiyun 	host->release_bus(host);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	ret = mmc_add_host(mmc);
1068*4882a593Smuzhiyun 	if (ret) {
1069*4882a593Smuzhiyun 		dev_err(dev, "mmc_add_host() returned %d\n", ret);
1070*4882a593Smuzhiyun 		slot->host->slot[id] = NULL;
1071*4882a593Smuzhiyun 		goto error;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	return 0;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun error:
1076*4882a593Smuzhiyun 	mmc_free_host(slot->mmc);
1077*4882a593Smuzhiyun 	return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
cvm_mmc_of_slot_remove(struct cvm_mmc_slot * slot)1080*4882a593Smuzhiyun int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	mmc_remove_host(slot->mmc);
1083*4882a593Smuzhiyun 	slot->host->slot[slot->bus_id] = NULL;
1084*4882a593Smuzhiyun 	mmc_free_host(slot->mmc);
1085*4882a593Smuzhiyun 	return 0;
1086*4882a593Smuzhiyun }
1087